Merge tag 'drm-misc-next-2018-11-28' of git://anongit.freedesktop.org/drm/drm-misc...
authorDave Airlie <airlied@redhat.com>
Thu, 29 Nov 2018 00:21:23 +0000 (10:21 +1000)
committerDave Airlie <airlied@redhat.com>
Thu, 29 Nov 2018 00:28:49 +0000 (10:28 +1000)
drm-misc-next for v4.21:

Core Changes:
- Merge drm_info.c into drm_debugfs.c
- Complete the fake drm_crtc_commit's hw_done/flip_done sooner.
- Remove deprecated drm_obj_ref/unref functions. All drivers use get/put now.
- Decrease stack use of drm_gem_prime_mmap.
- Improve documentation for dumb callbacks.

Driver Changes:
- Add edid support to virtio.
- Wait on implicit fence in meson and sun4i.
- Add support for BGRX8888 to sun4i.
- Preparation patches for sun4i driver to start supporting linear and tiled YUV formats.
- Add support for HDMI 1.4 4k modes to meson, and support for VIC alternate timings.
- Drop custom dumb_map in vkms.
- Small fixes and cleanups to v3d.

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/151a3270-b1be-ed75-bd58-6b29d741f592@linux.intel.com
799 files changed:
.mailmap
CREDITS
Documentation/ABI/testing/sysfs-class-led-trigger-pattern
Documentation/admin-guide/pm/cpufreq.rst
Documentation/cpu-freq/cpufreq-stats.txt
Documentation/devicetree/bindings/arm/shmobile.txt
Documentation/devicetree/bindings/cpufreq/arm_big_little_dt.txt [deleted file]
Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
Documentation/devicetree/bindings/display/renesas,du.txt
Documentation/devicetree/bindings/i2c/i2c-omap.txt
Documentation/gpu/amdgpu-dc.rst [new file with mode: 0644]
Documentation/gpu/drivers.rst
Documentation/gpu/drm-mm.rst
Documentation/i2c/busses/i2c-nvidia-gpu [new file with mode: 0644]
Documentation/vm/unevictable-lru.rst
Documentation/x86/x86_64/mm.txt
Documentation/x86/zero-page.txt
MAINTAINERS
Makefile
arch/alpha/include/asm/termios.h
arch/alpha/include/uapi/asm/ioctls.h
arch/alpha/include/uapi/asm/termbits.h
arch/arm/boot/dts/imx53-ppd.dts
arch/arm/boot/dts/imx6sll.dtsi
arch/arm/boot/dts/imx6sx-sdb.dtsi
arch/arm/boot/dts/vf610m4-colibri.dts
arch/arm/configs/multi_v7_defconfig
arch/arm/include/asm/cputype.h
arch/arm/include/asm/pgtable-2level.h
arch/arm/include/asm/proc-fns.h
arch/arm/kernel/bugs.c
arch/arm/kernel/head-common.S
arch/arm/kernel/setup.c
arch/arm/kernel/smp.c
arch/arm/mach-omap2/display.c
arch/arm/mm/proc-v7-bugs.c
arch/arm/mm/proc-v7.S
arch/arm/vfp/vfpmodule.c
arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
arch/arm64/boot/dts/renesas/r8a7795.dtsi
arch/arm64/boot/dts/renesas/r8a77980-condor.dts
arch/arm64/include/asm/processor.h
arch/arm64/kernel/setup.c
arch/arm64/mm/init.c
arch/arm64/mm/mmu.c
arch/m68k/include/asm/pgtable_mm.h
arch/microblaze/include/asm/pgtable.h
arch/mips/cavium-octeon/executive/cvmx-helper.c
arch/mips/mm/dma-noncoherent.c
arch/nds32/include/asm/pgtable.h
arch/parisc/include/asm/pgtable.h
arch/parisc/include/asm/spinlock.h
arch/parisc/kernel/syscall.S
arch/powerpc/include/asm/io.h
arch/powerpc/include/asm/ppc-opcode.h
arch/powerpc/include/asm/ptrace.h
arch/powerpc/kernel/setup_64.c
arch/powerpc/kvm/trace.h
arch/powerpc/kvm/trace_booke.h
arch/powerpc/kvm/trace_hv.h
arch/powerpc/kvm/trace_pr.h
arch/powerpc/mm/numa.c
arch/powerpc/mm/slb.c
arch/powerpc/platforms/powernv/npu-dma.c
arch/riscv/Makefile
arch/riscv/configs/defconfig
arch/riscv/include/asm/ptrace.h
arch/riscv/kernel/module.c
arch/riscv/lib/Makefile
arch/s390/Makefile
arch/s390/boot/compressed/Makefile
arch/s390/configs/debug_defconfig
arch/s390/configs/performance_defconfig
arch/s390/defconfig
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/pgalloc.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/processor.h
arch/s390/include/asm/thread_info.h
arch/s390/include/asm/tlb.h
arch/s390/kernel/entry.S
arch/s390/kernel/perf_cpum_cf.c
arch/s390/kernel/perf_cpum_sf.c
arch/s390/kernel/vdso32/Makefile
arch/s390/kernel/vdso64/Makefile
arch/s390/kernel/vmlinux.lds.S
arch/s390/mm/pgalloc.c
arch/s390/numa/numa.c
arch/um/drivers/ubd_kern.c
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/events/intel/uncore.h
arch/x86/events/intel/uncore_snb.c
arch/x86/include/asm/mce.h
arch/x86/include/asm/mshyperv.h
arch/x86/include/asm/page_64_types.h
arch/x86/include/asm/pgtable_64_types.h
arch/x86/include/asm/qspinlock.h
arch/x86/include/asm/xen/page.h
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/cpu/vmware.c
arch/x86/kernel/ldt.c
arch/x86/kernel/vsmp_64.c
arch/x86/xen/mmu_pv.c
arch/x86/xen/p2m.c
arch/x86/xen/spinlock.c
arch/xtensa/include/asm/processor.h
arch/xtensa/kernel/head.S
block/bio.c
block/blk-core.c
block/blk-lib.c
block/blk-merge.c
block/blk.h
block/bounce.c
crypto/crypto_user_base.c
crypto/crypto_user_stat.c
crypto/simd.c
drivers/acpi/Kconfig
drivers/acpi/nfit/core.c
drivers/acpi/nfit/mce.c
drivers/ata/libata-core.c
drivers/ata/sata_rcar.c
drivers/block/floppy.c
drivers/block/xen-blkfront.c
drivers/clk/clk-fixed-factor.c
drivers/clk/meson/axg.c
drivers/clk/meson/gxbb.c
drivers/clk/qcom/gcc-qcs404.c
drivers/clocksource/i8253.c
drivers/cpufreq/imx6q-cpufreq.c
drivers/cpuidle/cpuidle-arm.c
drivers/crypto/hisilicon/sec/sec_algs.c
drivers/firmware/efi/arm-init.c
drivers/firmware/efi/arm-runtime.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/libstub/arm-stub.c
drivers/firmware/efi/libstub/fdt.c
drivers/firmware/efi/memmap.c
drivers/firmware/efi/runtime-wrappers.c
drivers/gpu/drm/Makefile
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
drivers/gpu/drm/amd/amdgpu/ci_dpm.c
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/kv_dpm.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/si_dma.c
drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/amdgpu/vega10_ih.c
drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
drivers/gpu/drm/amd/amdkfd/cik_regs.h
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_debug.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/display/dc/core/dc_surface.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_bios_types.h
drivers/gpu/drm/amd/display/dc/dc_link.h
drivers/gpu/drm/amd/display/dc/dce/Makefile
drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h [moved from drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h with 55% similarity]
drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c [deleted file]
drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/Makefile
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
drivers/gpu/drm/amd/display/dc/dm_services_types.h
drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h
drivers/gpu/drm/amd/display/dc/inc/core_types.h
drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h [moved from drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h with 63% similarity]
drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
drivers/gpu/drm/amd/display/dc/inc/resource.h
drivers/gpu/drm/amd/display/modules/color/color_gamma.c
drivers/gpu/drm/amd/display/modules/color/color_gamma.h
drivers/gpu/drm/amd/display/modules/freesync/freesync.c
drivers/gpu/drm/amd/include/amd_shared.h
drivers/gpu/drm/amd/include/atomfirmware.h
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
drivers/gpu/drm/amd/powerplay/inc/smu7_common.h
drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
drivers/gpu/drm/ast/ast_drv.h
drivers/gpu/drm/ast/ast_ttm.c
drivers/gpu/drm/bochs/bochs.h
drivers/gpu/drm/bochs/bochs_mm.c
drivers/gpu/drm/cirrus/cirrus_drv.h
drivers/gpu/drm/cirrus/cirrus_ttm.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_atomic_uapi.c
drivers/gpu/drm/drm_connector.c
drivers/gpu/drm/drm_dp_helper.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_global.c [deleted file]
drivers/gpu/drm/etnaviv/etnaviv_sched.c
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gtt.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_fixed.h [new file with mode: 0644]
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem.h
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_context.h
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_gpu_error.h
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_oa_bdw.c
drivers/gpu/drm/i915/i915_oa_bdw.h
drivers/gpu/drm/i915/i915_oa_bxt.c
drivers/gpu/drm/i915/i915_oa_bxt.h
drivers/gpu/drm/i915/i915_oa_cflgt2.c
drivers/gpu/drm/i915/i915_oa_cflgt2.h
drivers/gpu/drm/i915/i915_oa_cflgt3.c
drivers/gpu/drm/i915/i915_oa_cflgt3.h
drivers/gpu/drm/i915/i915_oa_chv.c
drivers/gpu/drm/i915/i915_oa_chv.h
drivers/gpu/drm/i915/i915_oa_cnl.c
drivers/gpu/drm/i915/i915_oa_cnl.h
drivers/gpu/drm/i915/i915_oa_glk.c
drivers/gpu/drm/i915/i915_oa_glk.h
drivers/gpu/drm/i915/i915_oa_hsw.c
drivers/gpu/drm/i915/i915_oa_hsw.h
drivers/gpu/drm/i915/i915_oa_icl.c
drivers/gpu/drm/i915/i915_oa_icl.h
drivers/gpu/drm/i915/i915_oa_kblgt2.c
drivers/gpu/drm/i915/i915_oa_kblgt2.h
drivers/gpu/drm/i915/i915_oa_kblgt3.c
drivers/gpu/drm/i915/i915_oa_kblgt3.h
drivers/gpu/drm/i915/i915_oa_sklgt2.c
drivers/gpu/drm/i915/i915_oa_sklgt2.h
drivers/gpu/drm/i915/i915_oa_sklgt3.c
drivers/gpu/drm/i915/i915_oa_sklgt3.h
drivers/gpu/drm/i915/i915_oa_sklgt4.c
drivers/gpu/drm/i915/i915_oa_sklgt4.h
drivers/gpu/drm/i915/i915_params.c
drivers/gpu/drm/i915/i915_params.h
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/i915_query.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_request.h
drivers/gpu/drm/i915/i915_scheduler.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_scheduler.h
drivers/gpu/drm/i915/i915_syncmap.c
drivers/gpu/drm/i915/i915_timeline.h
drivers/gpu/drm/i915/i915_utils.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/icl_dsi.c
drivers/gpu/drm/i915/intel_atomic.c
drivers/gpu/drm/i915/intel_atomic_plane.c
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_cdclk.c
drivers/gpu/drm/i915/intel_color.c
drivers/gpu/drm/i915/intel_combo_phy.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_connector.c [moved from drivers/gpu/drm/i915/intel_modes.c with 54% similarity]
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_csr.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_device_info.c
drivers/gpu/drm/i915/intel_device_info.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_display.h
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_dpio_phy.c
drivers/gpu/drm/i915/intel_dpll_mgr.c
drivers/gpu/drm/i915/intel_dpll_mgr.h
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_dsi.h
drivers/gpu/drm/i915/intel_dsi_vbt.c
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_fbc.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_guc.c
drivers/gpu/drm/i915/intel_guc.h
drivers/gpu/drm/i915/intel_guc_fw.c
drivers/gpu/drm/i915/intel_guc_fwif.h
drivers/gpu/drm/i915/intel_guc_reg.h
drivers/gpu/drm/i915/intel_guc_submission.c
drivers/gpu/drm/i915/intel_hdcp.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_hotplug.c
drivers/gpu/drm/i915/intel_huc.c
drivers/gpu/drm/i915/intel_lpe_audio.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lspcon.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_opregion.h
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_psr.c
drivers/gpu/drm/i915/intel_quirks.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/i915/intel_uc.c
drivers/gpu/drm/i915/intel_uc_fw.h
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/i915/intel_vbt_defs.h
drivers/gpu/drm/i915/intel_workarounds.c
drivers/gpu/drm/i915/selftests/huge_pages.c
drivers/gpu/drm/i915/selftests/i915_gem_context.c
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
drivers/gpu/drm/i915/selftests/intel_guc.c
drivers/gpu/drm/i915/selftests/intel_hangcheck.c
drivers/gpu/drm/i915/selftests/intel_lrc.c
drivers/gpu/drm/i915/selftests/mock_engine.c
drivers/gpu/drm/i915/vlv_dsi.c
drivers/gpu/drm/meson/meson_venc.c
drivers/gpu/drm/mgag200/mgag200_drv.h
drivers/gpu/drm/mgag200/mgag200_ttm.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_ttm.c
drivers/gpu/drm/omapdrm/dss/dsi.c
drivers/gpu/drm/omapdrm/dss/dss.c
drivers/gpu/drm/omapdrm/dss/hdmi4.c
drivers/gpu/drm/omapdrm/dss/hdmi5.c
drivers/gpu/drm/omapdrm/dss/venc.c
drivers/gpu/drm/omapdrm/omap_crtc.c
drivers/gpu/drm/qxl/qxl_drv.h
drivers/gpu/drm/qxl/qxl_ttm.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r420.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_legacy_tv.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/rcar-du/rcar_du_crtc.c
drivers/gpu/drm/rcar-du/rcar_du_drv.c
drivers/gpu/drm/rcar-du/rcar_du_kms.c
drivers/gpu/drm/rcar-du/rcar_du_plane.c
drivers/gpu/drm/rcar-du/rcar_lvds.c
drivers/gpu/drm/scheduler/sched_entity.c
drivers/gpu/drm/scheduler/sched_main.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_execbuf_util.c
drivers/gpu/drm/ttm/ttm_memory.c
drivers/gpu/drm/v3d/v3d_sched.c
drivers/gpu/drm/virtio/virtgpu_drv.h
drivers/gpu/drm/virtio/virtgpu_ttm.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
drivers/gpu/vga/vga_switcheroo.c
drivers/hid/hid-alps.c
drivers/hid/hid-asus.c
drivers/hid/hid-ids.h
drivers/hid/hid-quirks.c
drivers/hid/i2c-hid/i2c-hid-core.c
drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
drivers/hid/usbhid/hiddev.c
drivers/hwmon/hwmon.c
drivers/hwmon/ibmpowernv.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/Makefile
drivers/i2c/busses/i2c-nvidia-gpu.c [new file with mode: 0644]
drivers/i2c/busses/i2c-qcom-geni.c
drivers/leds/trigger/ledtrig-pattern.c
drivers/mtd/devices/Kconfig
drivers/mtd/maps/sa1100-flash.c
drivers/mtd/nand/raw/nand_base.c
drivers/mtd/spi-nor/cadence-quadspi.c
drivers/mtd/spi-nor/spi-nor.c
drivers/net/bonding/bond_main.c
drivers/net/dsa/microchip/ksz_common.c
drivers/net/dsa/mv88e6xxx/global1.c
drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
drivers/net/ethernet/aquantia/atlantic/aq_hw.h
drivers/net/ethernet/aquantia/atlantic/aq_main.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.h
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
drivers/net/ethernet/atheros/alx/alx.h
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_common.c
drivers/net/ethernet/intel/ice/ice_ethtool.c
drivers/net/ethernet/intel/ice/ice_hw_autogen.h
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_switch.c
drivers/net/ethernet/intel/ice/ice_switch.h
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_txrx.h
drivers/net/ethernet/intel/ice/ice_type.h
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/qlogic/qed/qed_fcoe.c
drivers/net/ethernet/qlogic/qed/qed_iscsi.c
drivers/net/ethernet/qlogic/qed/qed_l2.c
drivers/net/ethernet/qlogic/qed/qed_mcp.c
drivers/net/ethernet/qlogic/qed/qed_rdma.c
drivers/net/ethernet/qlogic/qed/qed_roce.c
drivers/net/ethernet/qlogic/qed/qed_sp.h
drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
drivers/net/ethernet/qlogic/qed/qed_spq.c
drivers/net/ethernet/qlogic/qed/qed_sriov.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/descs_com.h
drivers/net/ethernet/stmicro/stmmac/enh_desc.c
drivers/net/ethernet/stmicro/stmmac/ring_mode.c
drivers/net/fddi/defza.c
drivers/net/fddi/defza.h
drivers/net/phy/broadcom.c
drivers/net/phy/realtek.c
drivers/net/usb/smsc95xx.c
drivers/nvme/host/core.c
drivers/nvme/host/multipath.c
drivers/nvme/target/core.c
drivers/nvme/target/rdma.c
drivers/of/device.c
drivers/of/of_numa.c
drivers/pci/pci-acpi.c
drivers/pinctrl/meson/pinctrl-meson-gxbb.c
drivers/pinctrl/meson/pinctrl-meson-gxl.c
drivers/pinctrl/meson/pinctrl-meson.c
drivers/pinctrl/meson/pinctrl-meson8.c
drivers/pinctrl/meson/pinctrl-meson8b.c
drivers/rtc/hctosys.c
drivers/rtc/rtc-cmos.c
drivers/rtc/rtc-pcf2127.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_mpc.h
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/Kconfig
drivers/scsi/NCR5380.c
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/myrb.c
drivers/scsi/myrs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_lib.c
drivers/scsi/ufs/ufshcd.c
drivers/staging/vboxvideo/vbox_drv.h
drivers/staging/vboxvideo/vbox_ttm.c
drivers/target/target_core_transport.c
drivers/tty/serial/sh-sci.c
drivers/tty/tty_baudrate.c
drivers/tty/vt/vt.c
drivers/usb/typec/ucsi/Kconfig
drivers/usb/typec/ucsi/Makefile
drivers/usb/typec/ucsi/ucsi_ccg.c [new file with mode: 0644]
drivers/xen/grant-table.c
drivers/xen/privcmd-buf.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/super.c
fs/btrfs/tree-checker.c
fs/btrfs/tree-log.c
fs/ceph/file.c
fs/ceph/mds_client.c
fs/ceph/quota.c
fs/ext4/inode.c
fs/ext4/namei.c
fs/ext4/resize.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/fuse/dev.c
fs/fuse/file.c
fs/gfs2/bmap.c
fs/gfs2/rgrp.c
fs/inode.c
fs/namespace.c
fs/nfs/callback_proc.c
fs/nfs/delegation.c
fs/nfs/nfs4state.c
fs/nfsd/nfs4proc.c
fs/notify/fanotify/fanotify.c
fs/notify/fsnotify.c
fs/ocfs2/aops.c
fs/ocfs2/cluster/masklog.h
fs/xfs/libxfs/xfs_attr_leaf.c
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_message.c
include/asm-generic/4level-fixup.h
include/asm-generic/5level-fixup.h
include/asm-generic/pgtable-nop4d-hack.h
include/asm-generic/pgtable-nop4d.h
include/asm-generic/pgtable-nopmd.h
include/asm-generic/pgtable-nopud.h
include/asm-generic/pgtable.h
include/drm/drmP.h
include/drm/drm_connector.h
include/drm/drm_dp_helper.h
include/drm/drm_global.h [deleted file]
include/drm/drm_hdcp.h
include/drm/gpu_scheduler.h
include/drm/i915_pciids.h
include/drm/ttm/ttm_bo_driver.h
include/drm/ttm/ttm_memory.h
include/linux/ceph/ceph_features.h
include/linux/compiler-gcc.h
include/linux/compiler.h
include/linux/compiler_attributes.h
include/linux/compiler_types.h
include/linux/efi.h
include/linux/hid.h
include/linux/i8253.h
include/linux/mm.h
include/linux/mtd/nand.h
include/linux/netdevice.h
include/linux/netfilter/ipset/ip_set.h
include/linux/netfilter/ipset/ip_set_comment.h
include/linux/nmi.h
include/linux/swap.h
include/net/addrconf.h
include/net/if_inet6.h
include/net/netfilter/nf_conntrack_l4proto.h
include/trace/events/kyber.h
include/uapi/drm/amdgpu_drm.h
include/uapi/drm/i915_drm.h
include/uapi/linux/kfd_ioctl.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/netfilter_bridge.h
include/uapi/linux/sctp.h
include/xen/xen-ops.h
kernel/bpf/core.c
kernel/bpf/syscall.c
kernel/debug/kdb/kdb_bt.c
kernel/debug/kdb/kdb_io.c
kernel/debug/kdb/kdb_keyboard.c
kernel/debug/kdb/kdb_main.c
kernel/debug/kdb/kdb_private.h
kernel/debug/kdb/kdb_support.c
kernel/resource.c
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/psi.c
kernel/time/posix-cpu-timers.c
kernel/trace/trace_probe.c
kernel/user_namespace.c
lib/raid6/test/Makefile
lib/ubsan.c
mm/gup.c
mm/hugetlb.c
mm/memblock.c
mm/page_alloc.c
mm/shmem.c
mm/swapfile.c
mm/vmscan.c
mm/vmstat.c
mm/z3fold.c
net/core/dev.c
net/core/flow_dissector.c
net/core/netpoll.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/ipv4/inet_fragment.c
net/ipv4/ip_fragment.c
net/ipv4/ip_sockglue.c
net/ipv6/af_inet6.c
net/ipv6/anycast.c
net/ipv6/ip6_fib.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_hash_netportnet.c
net/netfilter/ipset/ip_set_list_set.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_proto_dccp.c
net/netfilter/nf_conntrack_proto_generic.c
net/netfilter/nf_conntrack_proto_icmp.c
net/netfilter/nf_conntrack_proto_icmpv6.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_proto_udp.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nft_compat.c
net/netfilter/nft_numgen.c
net/netfilter/nft_osf.c
net/netfilter/xt_IDLETIMER.c
net/openvswitch/conntrack.c
net/rxrpc/ar-internal.h
net/rxrpc/call_event.c
net/rxrpc/output.c
net/sched/act_mirred.c
net/sched/cls_flower.c
net/sched/sch_netem.c
net/sctp/outqueue.c
net/sunrpc/auth_generic.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/xdr.c
net/tipc/link.c
scripts/faddr2line
scripts/kconfig/merge_config.sh
scripts/package/builddeb
scripts/package/mkdebian
scripts/package/mkspec
scripts/setlocalversion
scripts/spdxcheck.py
security/integrity/digsig_asymmetric.c
security/selinux/hooks.c
security/selinux/ss/mls.c
sound/pci/hda/thinkpad_helper.c
sound/x86/intel_hdmi_audio.c
tools/arch/arm64/include/asm/barrier.h
tools/perf/Documentation/perf-list.txt
tools/perf/Makefile.perf
tools/perf/builtin-record.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/builtin-trace.c
tools/perf/examples/bpf/augmented_raw_syscalls.c [new file with mode: 0644]
tools/perf/jvmti/jvmti_agent.c
tools/perf/scripts/python/exported-sql-viewer.py
tools/perf/tests/attr/test-record-group-sampling
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
tools/perf/util/intel-pt-decoder/intel-pt-log.c
tools/perf/util/intel-pt-decoder/intel-pt-log.h
tools/perf/util/intel-pt.c
tools/perf/util/pmu.c
tools/testing/nvdimm/test/nfit.c
tools/testing/selftests/powerpc/mm/wild_bctr.c

index a76be45fef6ca5b2d23139304ff5ea338bdd1d07..28fecafa65069c1af077453a4159a6a290949982 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -159,6 +159,7 @@ Peter Oruba <peter@oruba.de>
 Peter Oruba <peter.oruba@amd.com>
 Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
 Praveen BP <praveenbp@ti.com>
+Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
 Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com>
 Oleksij Rempel <linux@rempel-privat.de> <bug-track@fisher-privat.net>
 Oleksij Rempel <linux@rempel-privat.de> <external.Oleksij.Rempel@de.bosch.com>
diff --git a/CREDITS b/CREDITS
index 5befd2d714d0037548bed049a979dc4fcee1d300..84cbec4c62115adc38b976504b04799bb6fb0469 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -2138,6 +2138,10 @@ E: paul@laufernet.com
 D: Soundblaster driver fixes, ISAPnP quirk
 S: California, USA
 
+N: Jarkko Lavinen
+E: jarkko.lavinen@nokia.com
+D: OMAP MMC support
+
 N: Jonathan Layes
 D: ARPD support
 
index fb3d1e03b8819bb950e961172d27dac3a3191129..1e5d172e064624d96216eae51a4be60c02a29979 100644 (file)
@@ -37,8 +37,8 @@ Description:
                  0-|   /             \/             \/
                    +---0----1----2----3----4----5----6------------> time (s)
 
-               2. To make the LED go instantly from one brigntess value to another,
-               we should use use zero-time lengths (the brightness must be same as
+               2. To make the LED go instantly from one brightness value to another,
+               we should use zero-time lengths (the brightness must be same as
                the previous tuple's). So the format should be:
                "brightness_1 duration_1 brightness_1 0 brightness_2 duration_2
                brightness_2 0 ...". For example:
index 47153e64dfb530465ca01d28272e058293eb08b5..7eca9026a9ed2c3ed2a35b7e2184660e8caa9fdf 100644 (file)
@@ -150,7 +150,7 @@ data structures necessary to handle the given policy and, possibly, to add
 a governor ``sysfs`` interface to it.  Next, the governor is started by
 invoking its ``->start()`` callback.
 
-That callback it expected to register per-CPU utilization update callbacks for
+That callback is expected to register per-CPU utilization update callbacks for
 all of the online CPUs belonging to the given policy with the CPU scheduler.
 The utilization update callbacks will be invoked by the CPU scheduler on
 important events, like task enqueue and dequeue, on every iteration of the
index a873855c811d63f3a47cd2ec830404abb89d48c8..14378cecb1723f7d9b4b4f3b0b1ccd0ca3126c26 100644 (file)
@@ -86,9 +86,11 @@ transitions.
 This will give a fine grained information about all the CPU frequency
 transitions. The cat output here is a two dimensional matrix, where an entry
 <i,j> (row i, column j) represents the count of number of transitions from 
-Freq_i to Freq_j. Freq_i is in descending order with increasing rows and 
-Freq_j is in descending order with increasing columns. The output here also 
-contains the actual freq values for each row and column for better readability.
+Freq_i to Freq_j. Freq_i rows and Freq_j columns follow the sorting order in
+which the driver has provided the frequency table initially to the cpufreq core
+and so can be sorted (ascending or descending) or unsorted.  The output here
+also contains the actual freq values for each row and column for better
+readability.
 
 If the transition table is bigger than PAGE_SIZE, reading this will
 return an -EFBIG error.
index f5e0f82fd5031efb1570361eabf2d096769ef7f5..58c4256d37a39e5082cdb5f354548dbe0cf160d6 100644 (file)
@@ -27,7 +27,7 @@ SoCs:
     compatible = "renesas,r8a77470"
   - RZ/G2M (R8A774A1)
     compatible = "renesas,r8a774a1"
-  - RZ/G2E (RA8774C0)
+  - RZ/G2E (R8A774C0)
     compatible = "renesas,r8a774c0"
   - R-Car M1A (R8A77781)
     compatible = "renesas,r8a7778"
diff --git a/Documentation/devicetree/bindings/cpufreq/arm_big_little_dt.txt b/Documentation/devicetree/bindings/cpufreq/arm_big_little_dt.txt
deleted file mode 100644 (file)
index 2aa06ac..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-Generic ARM big LITTLE cpufreq driver's DT glue
------------------------------------------------
-
-This is DT specific glue layer for generic cpufreq driver for big LITTLE
-systems.
-
-Both required and optional properties listed below must be defined
-under node /cpus/cpu@x. Where x is the first cpu inside a cluster.
-
-FIXME: Cpus should boot in the order specified in DT and all cpus for a cluster
-must be present contiguously. Generic DT driver will check only node 'x' for
-cpu:x.
-
-Required properties:
-- operating-points: Refer to Documentation/devicetree/bindings/opp/opp.txt
-  for details
-
-Optional properties:
-- clock-latency: Specify the possible maximum transition latency for clock,
-  in unit of nanoseconds.
-
-Examples:
-
-cpus {
-       #address-cells = <1>;
-       #size-cells = <0>;
-
-       cpu@0 {
-               compatible = "arm,cortex-a15";
-               reg = <0>;
-               next-level-cache = <&L2>;
-               operating-points = <
-                       /* kHz    uV */
-                       792000  1100000
-                       396000  950000
-                       198000  850000
-               >;
-               clock-latency = <61036>; /* two CLK32 periods */
-       };
-
-       cpu@1 {
-               compatible = "arm,cortex-a15";
-               reg = <1>;
-               next-level-cache = <&L2>;
-       };
-
-       cpu@100 {
-               compatible = "arm,cortex-a7";
-               reg = <100>;
-               next-level-cache = <&L2>;
-               operating-points = <
-                       /* kHz    uV */
-                       792000  950000
-                       396000  750000
-                       198000  450000
-               >;
-               clock-latency = <61036>; /* two CLK32 periods */
-       };
-
-       cpu@101 {
-               compatible = "arm,cortex-a7";
-               reg = <101>;
-               next-level-cache = <&L2>;
-       };
-};
index 3aeb0ec06fd02421bcb412fdfbca08747476b904..ba5469dd09f35393e339af8000d6ca776d1c2f31 100644 (file)
@@ -13,6 +13,7 @@ Required properties:
   - "renesas,r8a7793-lvds" for R8A7793 (R-Car M2-N) compatible LVDS encoders
   - "renesas,r8a7795-lvds" for R8A7795 (R-Car H3) compatible LVDS encoders
   - "renesas,r8a7796-lvds" for R8A7796 (R-Car M3-W) compatible LVDS encoders
+  - "renesas,r8a77965-lvds" for R8A77965 (R-Car M3-N) compatible LVDS encoders
   - "renesas,r8a77970-lvds" for R8A77970 (R-Car V3M) compatible LVDS encoders
   - "renesas,r8a77980-lvds" for R8A77980 (R-Car V3H) compatible LVDS encoders
   - "renesas,r8a77990-lvds" for R8A77990 (R-Car E3) compatible LVDS encoders
index 9de67be632d1a7a42bbd765e5fa3cbd07b9a01f5..3c855d9f27193bcb03f365cc8c4a3a78a5d478a0 100644 (file)
@@ -4,7 +4,9 @@ Required Properties:
 
   - compatible: must be one of the following.
     - "renesas,du-r8a7743" for R8A7743 (RZ/G1M) compatible DU
+    - "renesas,du-r8a7744" for R8A7744 (RZ/G1N) compatible DU
     - "renesas,du-r8a7745" for R8A7745 (RZ/G1E) compatible DU
+    - "renesas,du-r8a77470" for R8A77470 (RZ/G1C) compatible DU
     - "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU
     - "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU
     - "renesas,du-r8a7791" for R8A7791 (R-Car M2-W) compatible DU
@@ -52,7 +54,9 @@ corresponding to each DU output.
                         Port0          Port1          Port2          Port3
 -----------------------------------------------------------------------------
  R8A7743 (RZ/G1M)       DPAD 0         LVDS 0         -              -
+ R8A7744 (RZ/G1N)       DPAD 0         LVDS 0         -              -
  R8A7745 (RZ/G1E)       DPAD 0         DPAD 1         -              -
+ R8A77470 (RZ/G1C)      DPAD 0         DPAD 1         LVDS 0         -
  R8A7779 (R-Car H1)     DPAD 0         DPAD 1         -              -
  R8A7790 (R-Car H2)     DPAD 0         LVDS 0         LVDS 1         -
  R8A7791 (R-Car M2-W)   DPAD 0         LVDS 0         -              -
index 7e49839d41249ca5168b0de1ea02781a2798486d..4b90ba9f31b70b712c285af7ef7f7be180b39b14 100644 (file)
@@ -1,8 +1,12 @@
 I2C for OMAP platforms
 
 Required properties :
-- compatible : Must be "ti,omap2420-i2c", "ti,omap2430-i2c", "ti,omap3-i2c"
-  or "ti,omap4-i2c"
+- compatible : Must be
+       "ti,omap2420-i2c" for OMAP2420 SoCs
+       "ti,omap2430-i2c" for OMAP2430 SoCs
+       "ti,omap3-i2c" for OMAP3 SoCs
+       "ti,omap4-i2c" for OMAP4+ SoCs
+       "ti,am654-i2c", "ti,omap4-i2c" for AM654 SoCs
 - ti,hwmods : Must be "i2c<n>", n being the instance number (1-based)
 - #address-cells = <1>;
 - #size-cells = <0>;
diff --git a/Documentation/gpu/amdgpu-dc.rst b/Documentation/gpu/amdgpu-dc.rst
new file mode 100644 (file)
index 0000000..cc89b0f
--- /dev/null
@@ -0,0 +1,68 @@
+===================================
+drm/amd/display - Display Core (DC)
+===================================
+
+*placeholder - general description of supported platforms, what dc is, etc.*
+
+Because it is partially shared with other operating systems, the Display Core
+Driver is divided in two pieces.
+
+1. **Display Core (DC)** contains the OS-agnostic components. Things like
+   hardware programming and resource management are handled here.
+2. **Display Manager (DM)** contains the OS-dependent components. Hooks to the
+   amdgpu base driver and DRM are implemented here.
+
+It doesn't help that the entire package is frequently referred to as DC. But
+with the context in mind, it should be clear.
+
+When CONFIG_DRM_AMD_DC is enabled, DC will be initialized by default for
+supported ASICs. To force disable, set `amdgpu.dc=0` on kernel command line.
+Likewise, to force enable on unsupported ASICs, set `amdgpu.dc=1`.
+
+To determine if DC is loaded, search dmesg for the following entry:
+
+``Display Core initialized with <version number here>``
+
+AMDgpu Display Manager
+======================
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+   :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+   :internal:
+
+Lifecycle
+---------
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+   :doc: DM Lifecycle
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+   :functions: dm_hw_init dm_hw_fini
+
+Interrupts
+----------
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+   :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+   :functions: register_hpd_handlers dm_crtc_high_irq dm_pflip_high_irq
+
+Atomic Implementation
+---------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+   :doc: atomic
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+   :functions: amdgpu_dm_atomic_check amdgpu_dm_atomic_commit_tail
+
+Display Core
+============
+
+**WIP**
index 7d2d3875ff1a0cf17f463410df0e0acdab2f88b6..7c1672118a73f4c59f066ef78fa95badb6a1ce43 100644 (file)
@@ -5,6 +5,7 @@ GPU Driver Documentation
 .. toctree::
 
    amdgpu
+   amdgpu-dc
    i915
    meson
    pl111
index e725e8449e7257212490547f4afd6924695b9632..d0f3c6b032009d6a516cd409611e86b9d29dd4f5 100644 (file)
@@ -72,8 +72,8 @@ object TTM to provide a pool for buffer object allocation by clients and
 the kernel itself. The type of this object should be
 TTM_GLOBAL_TTM_BO, and its size should be sizeof(struct
 ttm_bo_global). Again, driver-specific init and release functions may
-be provided, likely eventually calling ttm_bo_global_init() and
-ttm_bo_global_release(), respectively. Also, like the previous
+be provided, likely eventually calling ttm_bo_global_ref_init() and
+ttm_bo_global_ref_release(), respectively. Also, like the previous
 object, ttm_global_item_ref() is used to create an initial reference
 count for the TTM, which will call your initialization function.
 
diff --git a/Documentation/i2c/busses/i2c-nvidia-gpu b/Documentation/i2c/busses/i2c-nvidia-gpu
new file mode 100644 (file)
index 0000000..31884d2
--- /dev/null
@@ -0,0 +1,18 @@
+Kernel driver i2c-nvidia-gpu
+
+Datasheet: not publicly available.
+
+Authors:
+       Ajay Gupta <ajayg@nvidia.com>
+
+Description
+-----------
+
+i2c-nvidia-gpu is a driver for I2C controller included in NVIDIA Turing
+and later GPUs and it is used to communicate with Type-C controller on GPUs.
+
+If your 'lspci -v' listing shows something like the following,
+
+01:00.3 Serial bus controller [0c80]: NVIDIA Corporation Device 1ad9 (rev a1)
+
+then this driver should support the I2C controller of your GPU.
index fdd84cb8d511f4aef71f18f281ba629ad025d17f..b8e29f977f2d1f3a2898444bc9720c39ab9eaa4f 100644 (file)
@@ -143,7 +143,7 @@ using a number of wrapper functions:
        Query the address space, and return true if it is completely
        unevictable.
 
-These are currently used in two places in the kernel:
+These are currently used in three places in the kernel:
 
  (1) By ramfs to mark the address spaces of its inodes when they are created,
      and this mark remains for the life of the inode.
@@ -154,6 +154,10 @@ These are currently used in two places in the kernel:
      swapped out; the application must touch the pages manually if it wants to
      ensure they're in memory.
 
+ (3) By the i915 driver to mark pinned address space until it's unpinned. The
+     amount of unevictable memory marked by i915 driver is roughly the bounded
+     object size in debugfs/dri/0/i915_gem_objects.
+
 
 Detecting Unevictable Pages
 ---------------------------
index 73aaaa3da4369e39b41bf360b24b820061a15df9..804f9426ed17bdcf0c8fb6dc682ae9254050beb9 100644 (file)
@@ -34,23 +34,24 @@ __________________|____________|__________________|_________|___________________
 ____________________________________________________________|___________________________________________________________
                   |            |                  |         |
  ffff800000000000 | -128    TB | ffff87ffffffffff |    8 TB | ... guard hole, also reserved for hypervisor
- ffff880000000000 | -120    TB | ffffc7ffffffffff |   64 TB | direct mapping of all physical memory (page_offset_base)
- ffffc80000000000 |  -56    TB | ffffc8ffffffffff |    1 TB | ... unused hole
+ ffff880000000000 | -120    TB | ffff887fffffffff |  0.5 TB | LDT remap for PTI
+ ffff888000000000 | -119.5  TB | ffffc87fffffffff |   64 TB | direct mapping of all physical memory (page_offset_base)
+ ffffc88000000000 |  -55.5  TB | ffffc8ffffffffff |  0.5 TB | ... unused hole
  ffffc90000000000 |  -55    TB | ffffe8ffffffffff |   32 TB | vmalloc/ioremap space (vmalloc_base)
  ffffe90000000000 |  -23    TB | ffffe9ffffffffff |    1 TB | ... unused hole
  ffffea0000000000 |  -22    TB | ffffeaffffffffff |    1 TB | virtual memory map (vmemmap_base)
  ffffeb0000000000 |  -21    TB | ffffebffffffffff |    1 TB | ... unused hole
  ffffec0000000000 |  -20    TB | fffffbffffffffff |   16 TB | KASAN shadow memory
- fffffc0000000000 |   -4    TB | fffffdffffffffff |    2 TB | ... unused hole
-                  |            |                  |         | vaddr_end for KASLR
- fffffe0000000000 |   -2    TB | fffffe7fffffffff |  0.5 TB | cpu_entry_area mapping
- fffffe8000000000 |   -1.5  TB | fffffeffffffffff |  0.5 TB | LDT remap for PTI
- ffffff0000000000 |   -1    TB | ffffff7fffffffff |  0.5 TB | %esp fixup stacks
 __________________|____________|__________________|_________|____________________________________________________________
                                                             |
-                                                            | Identical layout to the 47-bit one from here on:
+                                                            | Identical layout to the 56-bit one from here on:
 ____________________________________________________________|____________________________________________________________
                   |            |                  |         |
+ fffffc0000000000 |   -4    TB | fffffdffffffffff |    2 TB | ... unused hole
+                  |            |                  |         | vaddr_end for KASLR
+ fffffe0000000000 |   -2    TB | fffffe7fffffffff |  0.5 TB | cpu_entry_area mapping
+ fffffe8000000000 |   -1.5  TB | fffffeffffffffff |  0.5 TB | ... unused hole
+ ffffff0000000000 |   -1    TB | ffffff7fffffffff |  0.5 TB | %esp fixup stacks
  ffffff8000000000 | -512    GB | ffffffeeffffffff |  444 GB | ... unused hole
  ffffffef00000000 |  -68    GB | fffffffeffffffff |   64 GB | EFI region mapping space
  ffffffff00000000 |   -4    GB | ffffffff7fffffff |    2 GB | ... unused hole
@@ -83,7 +84,7 @@ Notes:
 __________________|____________|__________________|_________|___________________________________________________________
                   |            |                  |         |
  0000800000000000 |  +64    PB | ffff7fffffffffff | ~16K PB | ... huge, still almost 64 bits wide hole of non-canonical
-                  |            |                  |         |     virtual memory addresses up to the -128 TB
+                  |            |                  |         |     virtual memory addresses up to the -64 PB
                   |            |                  |         |     starting offset of kernel mappings.
 __________________|____________|__________________|_________|___________________________________________________________
                                                             |
@@ -91,23 +92,24 @@ __________________|____________|__________________|_________|___________________
 ____________________________________________________________|___________________________________________________________
                   |            |                  |         |
  ff00000000000000 |  -64    PB | ff0fffffffffffff |    4 PB | ... guard hole, also reserved for hypervisor
- ff10000000000000 |  -60    PB | ff8fffffffffffff |   32 PB | direct mapping of all physical memory (page_offset_base)
- ff90000000000000 |  -28    PB | ff9fffffffffffff |    4 PB | LDT remap for PTI
+ ff10000000000000 |  -60    PB | ff10ffffffffffff | 0.25 PB | LDT remap for PTI
+ ff11000000000000 |  -59.75 PB | ff90ffffffffffff |   32 PB | direct mapping of all physical memory (page_offset_base)
+ ff91000000000000 |  -27.75 PB | ff9fffffffffffff | 3.75 PB | ... unused hole
  ffa0000000000000 |  -24    PB | ffd1ffffffffffff | 12.5 PB | vmalloc/ioremap space (vmalloc_base)
  ffd2000000000000 |  -11.5  PB | ffd3ffffffffffff |  0.5 PB | ... unused hole
  ffd4000000000000 |  -11    PB | ffd5ffffffffffff |  0.5 PB | virtual memory map (vmemmap_base)
  ffd6000000000000 |  -10.5  PB | ffdeffffffffffff | 2.25 PB | ... unused hole
  ffdf000000000000 |   -8.25 PB | fffffdffffffffff |   ~8 PB | KASAN shadow memory
- fffffc0000000000 |   -4    TB | fffffdffffffffff |    2 TB | ... unused hole
-                  |            |                  |         | vaddr_end for KASLR
- fffffe0000000000 |   -2    TB | fffffe7fffffffff |  0.5 TB | cpu_entry_area mapping
- fffffe8000000000 |   -1.5  TB | fffffeffffffffff |  0.5 TB | ... unused hole
- ffffff0000000000 |   -1    TB | ffffff7fffffffff |  0.5 TB | %esp fixup stacks
 __________________|____________|__________________|_________|____________________________________________________________
                                                             |
                                                             | Identical layout to the 47-bit one from here on:
 ____________________________________________________________|____________________________________________________________
                   |            |                  |         |
+ fffffc0000000000 |   -4    TB | fffffdffffffffff |    2 TB | ... unused hole
+                  |            |                  |         | vaddr_end for KASLR
+ fffffe0000000000 |   -2    TB | fffffe7fffffffff |  0.5 TB | cpu_entry_area mapping
+ fffffe8000000000 |   -1.5  TB | fffffeffffffffff |  0.5 TB | ... unused hole
+ ffffff0000000000 |   -1    TB | ffffff7fffffffff |  0.5 TB | %esp fixup stacks
  ffffff8000000000 | -512    GB | ffffffeeffffffff |  444 GB | ... unused hole
  ffffffef00000000 |  -68    GB | fffffffeffffffff |   64 GB | EFI region mapping space
  ffffffff00000000 |   -4    GB | ffffffff7fffffff |    2 GB | ... unused hole
index 97b7adbceda4828ab217301a305a3b20892d0603..68aed077f7b62ed0e70315bde602bfbec50a78bf 100644 (file)
@@ -25,7 +25,7 @@ Offset        Proto   Name            Meaning
 0C8/004        ALL     ext_cmd_line_ptr  cmd_line_ptr high 32bits
 140/080        ALL     edid_info       Video mode setup (struct edid_info)
 1C0/020        ALL     efi_info        EFI 32 information (struct efi_info)
-1E0/004        ALL     alk_mem_k       Alternative mem check, in KB
+1E0/004        ALL     alt_mem_k       Alternative mem check, in KB
 1E4/004        ALL     scratch         Scratch field for the kernel setup code
 1E8/001        ALL     e820_entries    Number of entries in e820_table (below)
 1E9/001        ALL     eddbuf_entries  Number of entries in eddbuf (below)
index 4caac2f6b01d4ae55e99fb08504cd5b743b897c5..1026150ae90fc80e26687186adc7ef02e08e4f47 100644 (file)
@@ -6620,9 +6620,9 @@ F:        arch/*/include/asm/suspend*.h
 
 HID CORE LAYER
 M:     Jiri Kosina <jikos@kernel.org>
-R:     Benjamin Tissoires <benjamin.tissoires@redhat.com>
+M:     Benjamin Tissoires <benjamin.tissoires@redhat.com>
 L:     linux-input@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git
 S:     Maintained
 F:     drivers/hid/
 F:     include/linux/hid*
@@ -6874,6 +6874,13 @@ L:       linux-acpi@vger.kernel.org
 S:     Maintained
 F:     drivers/i2c/i2c-core-acpi.c
 
+I2C CONTROLLER DRIVER FOR NVIDIA GPU
+M:     Ajay Gupta <ajayg@nvidia.com>
+L:     linux-i2c@vger.kernel.org
+S:     Maintained
+F:     Documentation/i2c/busses/i2c-nvidia-gpu
+F:     drivers/i2c/busses/i2c-nvidia-gpu.c
+
 I2C MUXES
 M:     Peter Rosin <peda@axentia.se>
 L:     linux-i2c@vger.kernel.org
@@ -8380,7 +8387,7 @@ F:        drivers/media/dvb-frontends/lgdt3305.*
 LIBATA PATA ARASAN COMPACT FLASH CONTROLLER
 M:     Viresh Kumar <vireshk@kernel.org>
 L:     linux-ide@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
 S:     Maintained
 F:     include/linux/pata_arasan_cf_data.h
 F:     drivers/ata/pata_arasan_cf.c
@@ -8397,7 +8404,7 @@ F:        drivers/ata/ata_generic.c
 LIBATA PATA FARADAY FTIDE010 AND GEMINI SATA BRIDGE DRIVERS
 M:     Linus Walleij <linus.walleij@linaro.org>
 L:     linux-ide@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
 S:     Maintained
 F:     drivers/ata/pata_ftide010.c
 F:     drivers/ata/sata_gemini.c
@@ -8416,7 +8423,7 @@ F:        include/linux/ahci_platform.h
 LIBATA SATA PROMISE TX2/TX4 CONTROLLER DRIVER
 M:     Mikael Pettersson <mikpelinux@gmail.com>
 L:     linux-ide@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
 S:     Maintained
 F:     drivers/ata/sata_promise.*
 
@@ -10797,6 +10804,14 @@ L:     linux-omap@vger.kernel.org
 S:     Maintained
 F:     arch/arm/mach-omap2/omap_hwmod.*
 
+OMAP I2C DRIVER
+M:     Vignesh R <vigneshr@ti.com>
+L:     linux-omap@vger.kernel.org
+L:     linux-i2c@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/i2c/i2c-omap.txt
+F:     drivers/i2c/busses/i2c-omap.c
+
 OMAP IMAGING SUBSYSTEM (OMAP3 ISP and OMAP4 ISS)
 M:     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
 L:     linux-media@vger.kernel.org
@@ -10806,9 +10821,9 @@ F:      drivers/media/platform/omap3isp/
 F:     drivers/staging/media/omap4iss/
 
 OMAP MMC SUPPORT
-M:     Jarkko Lavinen <jarkko.lavinen@nokia.com>
+M:     Aaro Koskinen <aaro.koskinen@iki.fi>
 L:     linux-omap@vger.kernel.org
-S:     Maintained
+S:     Odd Fixes
 F:     drivers/mmc/host/omap.c
 
 OMAP POWER MANAGEMENT SUPPORT
@@ -11743,6 +11758,7 @@ F:      Documentation/devicetree/bindings/pinctrl/fsl,*
 PIN CONTROLLER - INTEL
 M:     Mika Westerberg <mika.westerberg@linux.intel.com>
 M:     Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/intel.git
 S:     Maintained
 F:     drivers/pinctrl/intel/
 
@@ -15449,9 +15465,9 @@ F:      include/linux/usb/gadget*
 
 USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...)
 M:     Jiri Kosina <jikos@kernel.org>
-R:     Benjamin Tissoires <benjamin.tissoires@redhat.com>
+M:     Benjamin Tissoires <benjamin.tissoires@redhat.com>
 L:     linux-usb@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git
 S:     Maintained
 F:     Documentation/hid/hiddev.txt
 F:     drivers/hid/usbhid/
index 9fce8b91c15f6055f534eb18432e706a5ac3b09c..ddbf627cad8f5fd8a2d0d06a913294d88a0ff878 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 20
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc3
 NAME = "People's Front"
 
 # *DOCUMENTATION*
index 6a8c53dec57e6e3aa22a5be371b922ebb1bd154d..b7c77bb1bfd20368a8ff95a93d5493353e58023a 100644 (file)
 })
 
 #define user_termios_to_kernel_termios(k, u) \
-       copy_from_user(k, u, sizeof(struct termios))
+       copy_from_user(k, u, sizeof(struct termios2))
 
 #define kernel_termios_to_user_termios(u, k) \
+       copy_to_user(u, k, sizeof(struct termios2))
+
+#define user_termios_to_kernel_termios_1(k, u) \
+       copy_from_user(k, u, sizeof(struct termios))
+
+#define kernel_termios_to_user_termios_1(u, k) \
        copy_to_user(u, k, sizeof(struct termios))
 
 #endif /* _ALPHA_TERMIOS_H */
index 1e9121c9b3c74c16d129ce6fac97f614080dca94..971311605288faea94b19d23d0b346361a11a6a9 100644 (file)
 #define TCXONC         _IO('t', 30)
 #define TCFLSH         _IO('t', 31)
 
+#define TCGETS2                _IOR('T', 42, struct termios2)
+#define TCSETS2                _IOW('T', 43, struct termios2)
+#define TCSETSW2       _IOW('T', 44, struct termios2)
+#define TCSETSF2       _IOW('T', 45, struct termios2)
+
 #define TIOCSWINSZ     _IOW('t', 103, struct winsize)
 #define TIOCGWINSZ     _IOR('t', 104, struct winsize)
 #define        TIOCSTART       _IO('t', 110)           /* start output, like ^Q */
index de6c8360fbe3657e3ddf7cd6bb648a3d8b0fdb71..4575ba34a0eaeecb9b17cb9f3b6b18a698bafdfb 100644 (file)
@@ -26,6 +26,19 @@ struct termios {
        speed_t c_ospeed;               /* output speed */
 };
 
+/* Alpha has identical termios and termios2 */
+
+struct termios2 {
+       tcflag_t c_iflag;               /* input mode flags */
+       tcflag_t c_oflag;               /* output mode flags */
+       tcflag_t c_cflag;               /* control mode flags */
+       tcflag_t c_lflag;               /* local mode flags */
+       cc_t c_cc[NCCS];                /* control characters */
+       cc_t c_line;                    /* line discipline (== c_cc[19]) */
+       speed_t c_ispeed;               /* input speed */
+       speed_t c_ospeed;               /* output speed */
+};
+
 /* Alpha has matching termios and ktermios */
 
 struct ktermios {
@@ -152,6 +165,7 @@ struct ktermios {
 #define B3000000  00034
 #define B3500000  00035
 #define B4000000  00036
+#define BOTHER    00037
 
 #define CSIZE  00001400
 #define   CS5  00000000
@@ -169,6 +183,9 @@ struct ktermios {
 #define CMSPAR   010000000000          /* mark or space (stick) parity */
 #define CRTSCTS          020000000000          /* flow control */
 
+#define CIBAUD 07600000
+#define IBSHIFT        16
+
 /* c_lflag bits */
 #define ISIG   0x00000080
 #define ICANON 0x00000100
index b560ff88459bf1b74a0c093f3bdc15d2118c0182..5ff9a179c83c3326ab2dec5fdceee155021bf716 100644 (file)
@@ -55,7 +55,7 @@
        };
 
        chosen {
-               stdout-path = "&uart1:115200n8";
+               stdout-path = "serial0:115200n8";
        };
 
        memory@70000000 {
index ed9a980bce8501fcca0c3d357a8440cb8debd59d..beefa1b2049d7b56476a62429c2b4e4abf65ad33 100644 (file)
                        i2c1: i2c@21a0000 {
                                #address-cells = <1>;
                                #size-cells = <0>;
-                               compatible = "fs,imx6sll-i2c", "fsl,imx21-i2c";
+                               compatible = "fsl,imx6sll-i2c", "fsl,imx21-i2c";
                                reg = <0x021a0000 0x4000>;
                                interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clks IMX6SLL_CLK_I2C1>;
index 53b3408b5fab1845248b2b7ff078eba462f730ab..7d7d679945d28efe4f827e1b6197001088826b7f 100644 (file)
                regulator-name = "enet_3v3";
                regulator-min-microvolt = <3300000>;
                regulator-max-microvolt = <3300000>;
-               gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
+               gpio = <&gpio2 6 GPIO_ACTIVE_LOW>;
+               regulator-boot-on;
+               regulator-always-on;
        };
 
        reg_pcie_gpio: regulator-pcie-gpio {
        phy-supply = <&reg_enet_3v3>;
        phy-mode = "rgmii";
        phy-handle = <&ethphy1>;
+       phy-reset-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
        status = "okay";
 
        mdio {
                                MX6SX_PAD_RGMII1_RD3__ENET1_RX_DATA_3   0x3081
                                MX6SX_PAD_RGMII1_RX_CTL__ENET1_RX_EN    0x3081
                                MX6SX_PAD_ENET2_RX_CLK__ENET2_REF_CLK_25M       0x91
+                               /* phy reset */
+                               MX6SX_PAD_ENET2_CRS__GPIO2_IO_7         0x10b0
                        >;
                };
 
index 41ec66a969907d492dabec284e6a101f592e0216..ca62495587602f44d3e514fb2df910edfc584ea1 100644 (file)
@@ -50,8 +50,8 @@
        compatible = "fsl,vf610m4";
 
        chosen {
-               bootargs = "console=ttyLP2,115200 clk_ignore_unused init=/linuxrc rw";
-               stdout-path = "&uart2";
+               bootargs = "clk_ignore_unused init=/linuxrc rw";
+               stdout-path = "serial2:115200";
        };
 
        memory@8c000000 {
index 1c7616815a86ab80fa81ffacbb509a2408554cfa..63af6234c1b69a20b763470a8a73a989e1d7f747 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_SYSVIPC=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
-CONFIG_PREEMPT=y
 CONFIG_CGROUPS=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EMBEDDED=y
index 0d289240b6ca110ab961a280ddd20fc1c567f2a4..775cac3c02bb0a31facb970e16feef83f86c6632 100644 (file)
 #include <linux/kernel.h>
 
 extern unsigned int processor_id;
+struct proc_info_list *lookup_processor(u32 midr);
 
 #ifdef CONFIG_CPU_CP15
 #define read_cpuid(reg)                                                        \
index 92fd2c8a9af0638834d6c2b5814b9a88911f33fe..12659ce5c1f38e2f166937b18957c4fbf5732c3d 100644 (file)
@@ -10,7 +10,7 @@
 #ifndef _ASM_PGTABLE_2LEVEL_H
 #define _ASM_PGTABLE_2LEVEL_H
 
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 
 /*
  * Hardware-wise, we have a two level page table structure, where the first
index e25f4392e1b2868446de858701d408aaaee26eab..e1b6f280ab088fb0b8ac59b6ceb3543606c97e01 100644 (file)
@@ -23,7 +23,7 @@ struct mm_struct;
 /*
  * Don't change this structure - ASM code relies on it.
  */
-extern struct processor {
+struct processor {
        /* MISC
         * get data abort address/flags
         */
@@ -79,9 +79,13 @@ extern struct processor {
        unsigned int suspend_size;
        void (*do_suspend)(void *);
        void (*do_resume)(void *);
-} processor;
+};
 
 #ifndef MULTI_CPU
+static inline void init_proc_vtable(const struct processor *p)
+{
+}
+
 extern void cpu_proc_init(void);
 extern void cpu_proc_fin(void);
 extern int cpu_do_idle(void);
@@ -98,17 +102,50 @@ extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
 extern void cpu_do_suspend(void *);
 extern void cpu_do_resume(void *);
 #else
-#define cpu_proc_init                  processor._proc_init
-#define cpu_proc_fin                   processor._proc_fin
-#define cpu_reset                      processor.reset
-#define cpu_do_idle                    processor._do_idle
-#define cpu_dcache_clean_area          processor.dcache_clean_area
-#define cpu_set_pte_ext                        processor.set_pte_ext
-#define cpu_do_switch_mm               processor.switch_mm
 
-/* These three are private to arch/arm/kernel/suspend.c */
-#define cpu_do_suspend                 processor.do_suspend
-#define cpu_do_resume                  processor.do_resume
+extern struct processor processor;
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+#include <linux/smp.h>
+/*
+ * This can't be a per-cpu variable because we need to access it before
+ * per-cpu has been initialised.  We have a couple of functions that are
+ * called in a pre-emptible context, and so can't use smp_processor_id()
+ * there, hence PROC_TABLE().  We insist in init_proc_vtable() that the
+ * function pointers for these are identical across all CPUs.
+ */
+extern struct processor *cpu_vtable[];
+#define PROC_VTABLE(f)                 cpu_vtable[smp_processor_id()]->f
+#define PROC_TABLE(f)                  cpu_vtable[0]->f
+static inline void init_proc_vtable(const struct processor *p)
+{
+       unsigned int cpu = smp_processor_id();
+       *cpu_vtable[cpu] = *p;
+       WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
+                    cpu_vtable[0]->dcache_clean_area);
+       WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
+                    cpu_vtable[0]->set_pte_ext);
+}
+#else
+#define PROC_VTABLE(f)                 processor.f
+#define PROC_TABLE(f)                  processor.f
+static inline void init_proc_vtable(const struct processor *p)
+{
+       processor = *p;
+}
+#endif
+
+#define cpu_proc_init                  PROC_VTABLE(_proc_init)
+#define cpu_check_bugs                 PROC_VTABLE(check_bugs)
+#define cpu_proc_fin                   PROC_VTABLE(_proc_fin)
+#define cpu_reset                      PROC_VTABLE(reset)
+#define cpu_do_idle                    PROC_VTABLE(_do_idle)
+#define cpu_dcache_clean_area          PROC_TABLE(dcache_clean_area)
+#define cpu_set_pte_ext                        PROC_TABLE(set_pte_ext)
+#define cpu_do_switch_mm               PROC_VTABLE(switch_mm)
+
+/* These two are private to arch/arm/kernel/suspend.c */
+#define cpu_do_suspend                 PROC_VTABLE(do_suspend)
+#define cpu_do_resume                  PROC_VTABLE(do_resume)
 #endif
 
 extern void cpu_resume(void);
index 7be5113101915cd81a5558f45238041138fb5a58..d41d3598e5e541115c08f9b81b26fd187a7fe7af 100644 (file)
@@ -6,8 +6,8 @@
 void check_other_bugs(void)
 {
 #ifdef MULTI_CPU
-       if (processor.check_bugs)
-               processor.check_bugs();
+       if (cpu_check_bugs)
+               cpu_check_bugs();
 #endif
 }
 
index 6e0375e7db055bc82cf0674b37b74646e2d64ff0..997b02302c3145f5ac380ae18823eba50d916ac7 100644 (file)
@@ -145,6 +145,9 @@ __mmap_switched_data:
 #endif
        .size   __mmap_switched_data, . - __mmap_switched_data
 
+       __FINIT
+       .text
+
 /*
  * This provides a C-API version of __lookup_processor_type
  */
@@ -156,9 +159,6 @@ ENTRY(lookup_processor_type)
        ldmfd   sp!, {r4 - r6, r9, pc}
 ENDPROC(lookup_processor_type)
 
-       __FINIT
-       .text
-
 /*
  * Read processor ID register (CP#15, CR0), and look up in the linker-built
  * supported processor list.  Note that we can't use the absolute addresses
index ac7e08886863cfa74855e5b91c4f436e85da1e0a..375b13f7e780663eddb3f04e632751064a6b5bfd 100644 (file)
@@ -114,6 +114,11 @@ EXPORT_SYMBOL(elf_hwcap2);
 
 #ifdef MULTI_CPU
 struct processor processor __ro_after_init;
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+struct processor *cpu_vtable[NR_CPUS] = {
+       [0] = &processor,
+};
+#endif
 #endif
 #ifdef MULTI_TLB
 struct cpu_tlb_fns cpu_tlb __ro_after_init;
@@ -666,28 +671,33 @@ static void __init smp_build_mpidr_hash(void)
 }
 #endif
 
-static void __init setup_processor(void)
+/*
+ * locate processor in the list of supported processor types.  The linker
+ * builds this table for us from the entries in arch/arm/mm/proc-*.S
+ */
+struct proc_info_list *lookup_processor(u32 midr)
 {
-       struct proc_info_list *list;
+       struct proc_info_list *list = lookup_processor_type(midr);
 
-       /*
-        * locate processor in the list of supported processor
-        * types.  The linker builds this table for us from the
-        * entries in arch/arm/mm/proc-*.S
-        */
-       list = lookup_processor_type(read_cpuid_id());
        if (!list) {
-               pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
-                      read_cpuid_id());
-               while (1);
+               pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
+                      smp_processor_id(), midr);
+               while (1)
+               /* can't use cpu_relax() here as it may require MMU setup */;
        }
 
+       return list;
+}
+
+static void __init setup_processor(void)
+{
+       unsigned int midr = read_cpuid_id();
+       struct proc_info_list *list = lookup_processor(midr);
+
        cpu_name = list->cpu_name;
        __cpu_architecture = __get_cpu_architecture();
 
-#ifdef MULTI_CPU
-       processor = *list->proc;
-#endif
+       init_proc_vtable(list->proc);
 #ifdef MULTI_TLB
        cpu_tlb = *list->tlb;
 #endif
@@ -699,7 +709,7 @@ static void __init setup_processor(void)
 #endif
 
        pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
-               cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
+               list->cpu_name, midr, midr & 15,
                proc_arch[cpu_architecture()], get_cr());
 
        snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
index 0978282d5fc27a7c4a5e6b0e274da8bfc4c14c8d..12a6172263c0b057a94f2041accf581088374fb0 100644 (file)
@@ -42,6 +42,7 @@
 #include <asm/mmu_context.h>
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
+#include <asm/procinfo.h>
 #include <asm/processor.h>
 #include <asm/sections.h>
 #include <asm/tlbflush.h>
@@ -102,6 +103,30 @@ static unsigned long get_arch_pgd(pgd_t *pgd)
 #endif
 }
 
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+static int secondary_biglittle_prepare(unsigned int cpu)
+{
+       if (!cpu_vtable[cpu])
+               cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
+
+       return cpu_vtable[cpu] ? 0 : -ENOMEM;
+}
+
+static void secondary_biglittle_init(void)
+{
+       init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
+}
+#else
+static int secondary_biglittle_prepare(unsigned int cpu)
+{
+       return 0;
+}
+
+static void secondary_biglittle_init(void)
+{
+}
+#endif
+
 int __cpu_up(unsigned int cpu, struct task_struct *idle)
 {
        int ret;
@@ -109,6 +134,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
        if (!smp_ops.smp_boot_secondary)
                return -ENOSYS;
 
+       ret = secondary_biglittle_prepare(cpu);
+       if (ret)
+               return ret;
+
        /*
         * We need to tell the secondary core where to find
         * its stack and the page tables.
@@ -359,6 +388,8 @@ asmlinkage void secondary_start_kernel(void)
        struct mm_struct *mm = &init_mm;
        unsigned int cpu;
 
+       secondary_biglittle_init();
+
        /*
         * The identity mapping is uncached (strongly ordered), so
         * switch away from it before attempting any exclusive accesses.
index 9500b6e2738019a4fb53e50c8150a2972ca8c391..f86b72d1d59e51f4af15319df87ee61141b4fd02 100644 (file)
@@ -209,11 +209,61 @@ static int __init omapdss_init_fbdev(void)
 
        return 0;
 }
-#else
-static inline int omapdss_init_fbdev(void)
+
+static const char * const omapdss_compat_names[] __initconst = {
+       "ti,omap2-dss",
+       "ti,omap3-dss",
+       "ti,omap4-dss",
+       "ti,omap5-dss",
+       "ti,dra7-dss",
+};
+
+static struct device_node * __init omapdss_find_dss_of_node(void)
 {
-       return 0;
+       struct device_node *node;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(omapdss_compat_names); ++i) {
+               node = of_find_compatible_node(NULL, NULL,
+                       omapdss_compat_names[i]);
+               if (node)
+                       return node;
+       }
+
+       return NULL;
 }
+
+static int __init omapdss_init_of(void)
+{
+       int r;
+       struct device_node *node;
+       struct platform_device *pdev;
+
+       /* only create dss helper devices if dss is enabled in the .dts */
+
+       node = omapdss_find_dss_of_node();
+       if (!node)
+               return 0;
+
+       if (!of_device_is_available(node))
+               return 0;
+
+       pdev = of_find_device_by_node(node);
+
+       if (!pdev) {
+               pr_err("Unable to find DSS platform device\n");
+               return -ENODEV;
+       }
+
+       r = of_platform_populate(node, NULL, NULL, &pdev->dev);
+       if (r) {
+               pr_err("Unable to populate DSS submodule devices\n");
+               return r;
+       }
+
+       return omapdss_init_fbdev();
+}
+omap_device_initcall(omapdss_init_of);
 #endif /* CONFIG_FB_OMAP2 */
 
 static void dispc_disable_outputs(void)
@@ -361,58 +411,3 @@ int omap_dss_reset(struct omap_hwmod *oh)
 
        return r;
 }
-
-static const char * const omapdss_compat_names[] __initconst = {
-       "ti,omap2-dss",
-       "ti,omap3-dss",
-       "ti,omap4-dss",
-       "ti,omap5-dss",
-       "ti,dra7-dss",
-};
-
-static struct device_node * __init omapdss_find_dss_of_node(void)
-{
-       struct device_node *node;
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(omapdss_compat_names); ++i) {
-               node = of_find_compatible_node(NULL, NULL,
-                       omapdss_compat_names[i]);
-               if (node)
-                       return node;
-       }
-
-       return NULL;
-}
-
-static int __init omapdss_init_of(void)
-{
-       int r;
-       struct device_node *node;
-       struct platform_device *pdev;
-
-       /* only create dss helper devices if dss is enabled in the .dts */
-
-       node = omapdss_find_dss_of_node();
-       if (!node)
-               return 0;
-
-       if (!of_device_is_available(node))
-               return 0;
-
-       pdev = of_find_device_by_node(node);
-
-       if (!pdev) {
-               pr_err("Unable to find DSS platform device\n");
-               return -ENODEV;
-       }
-
-       r = of_platform_populate(node, NULL, NULL, &pdev->dev);
-       if (r) {
-               pr_err("Unable to populate DSS submodule devices\n");
-               return r;
-       }
-
-       return omapdss_init_fbdev();
-}
-omap_device_initcall(omapdss_init_of);
index 5544b82a2e7a553d015e23d77a9017682dd91f11..9a07916af8dd27dd021781c06451340ce6d03032 100644 (file)
@@ -52,8 +52,6 @@ static void cpu_v7_spectre_init(void)
        case ARM_CPU_PART_CORTEX_A17:
        case ARM_CPU_PART_CORTEX_A73:
        case ARM_CPU_PART_CORTEX_A75:
-               if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
-                       goto bl_error;
                per_cpu(harden_branch_predictor_fn, cpu) =
                        harden_branch_predictor_bpiall;
                spectre_v2_method = "BPIALL";
@@ -61,8 +59,6 @@ static void cpu_v7_spectre_init(void)
 
        case ARM_CPU_PART_CORTEX_A15:
        case ARM_CPU_PART_BRAHMA_B15:
-               if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
-                       goto bl_error;
                per_cpu(harden_branch_predictor_fn, cpu) =
                        harden_branch_predictor_iciallu;
                spectre_v2_method = "ICIALLU";
@@ -88,11 +84,9 @@ static void cpu_v7_spectre_init(void)
                                          ARM_SMCCC_ARCH_WORKAROUND_1, &res);
                        if ((int)res.a0 != 0)
                                break;
-                       if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
-                               goto bl_error;
                        per_cpu(harden_branch_predictor_fn, cpu) =
                                call_hvc_arch_workaround_1;
-                       processor.switch_mm = cpu_v7_hvc_switch_mm;
+                       cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
                        spectre_v2_method = "hypervisor";
                        break;
 
@@ -101,11 +95,9 @@ static void cpu_v7_spectre_init(void)
                                          ARM_SMCCC_ARCH_WORKAROUND_1, &res);
                        if ((int)res.a0 != 0)
                                break;
-                       if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
-                               goto bl_error;
                        per_cpu(harden_branch_predictor_fn, cpu) =
                                call_smc_arch_workaround_1;
-                       processor.switch_mm = cpu_v7_smc_switch_mm;
+                       cpu_do_switch_mm = cpu_v7_smc_switch_mm;
                        spectre_v2_method = "firmware";
                        break;
 
@@ -119,11 +111,6 @@ static void cpu_v7_spectre_init(void)
        if (spectre_v2_method)
                pr_info("CPU%u: Spectre v2: using %s workaround\n",
                        smp_processor_id(), spectre_v2_method);
-       return;
-
-bl_error:
-       pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
-               cpu);
 }
 #else
 static void cpu_v7_spectre_init(void)
index 6fe52819e0148c6f3f04b11c75e278cd0b04a1f9..339eb17c9808e2c04a043485e42e5d29a49de347 100644 (file)
@@ -112,7 +112,7 @@ ENTRY(cpu_v7_hvc_switch_mm)
        hvc     #0
        ldmfd   sp!, {r0 - r3}
        b       cpu_v7_switch_mm
-ENDPROC(cpu_v7_smc_switch_mm)
+ENDPROC(cpu_v7_hvc_switch_mm)
 #endif
 ENTRY(cpu_v7_iciallu_switch_mm)
        mov     r3, #0
index aff6e6eadc700f08241668e66c1235ea825217ce..ee7b07938dd59311f47fe5b385bcc27bfa4878f8 100644 (file)
@@ -573,7 +573,7 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp,
         */
        ufp_exc->fpexc = hwstate->fpexc;
        ufp_exc->fpinst = hwstate->fpinst;
-       ufp_exc->fpinst2 = ufp_exc->fpinst2;
+       ufp_exc->fpinst2 = hwstate->fpinst2;
 
        /* Ensure that VFP is disabled. */
        vfp_flush_hwstate(thread);
index 8253a1a9e9857112f43c24d85c5c411e653376dd..fef7351e9f677da62cd0c50e8c424a3590dd7b51 100644 (file)
                        clock-names = "stmmaceth";
                        tx-fifo-depth = <16384>;
                        rx-fifo-depth = <16384>;
+                       snps,multicast-filter-bins = <256>;
                        status = "disabled";
                };
 
                        clock-names = "stmmaceth";
                        tx-fifo-depth = <16384>;
                        rx-fifo-depth = <16384>;
+                       snps,multicast-filter-bins = <256>;
                        status = "disabled";
                };
 
                        clock-names = "stmmaceth";
                        tx-fifo-depth = <16384>;
                        rx-fifo-depth = <16384>;
+                       snps,multicast-filter-bins = <256>;
                        status = "disabled";
                };
 
index b5f2273caca4ded1e6bc0cfe3a5e52b97a3fd854..a79c8d369e0b48c4ddb3448bf4d2498acb676504 100644 (file)
                        clock-names = "fck", "brg_int", "scif_clk";
                        dmas = <&dmac1 0x35>, <&dmac1 0x34>,
                               <&dmac2 0x35>, <&dmac2 0x34>;
-                       dma-names = "tx", "rx";
+                       dma-names = "tx", "rx", "tx", "rx";
                        power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
                        resets = <&cpg 518>;
                        status = "disabled";
index fe2e2c051cc93fc0668a3d3e59a3b13432ef4766..5a7012be0d6ad953198c035df5626f2ff8ce0fe9 100644 (file)
@@ -15,7 +15,7 @@
 
        aliases {
                serial0 = &scif0;
-               ethernet0 = &avb;
+               ethernet0 = &gether;
        };
 
        chosen {
        };
 };
 
-&avb {
-       pinctrl-0 = <&avb_pins>;
-       pinctrl-names = "default";
-
-       phy-mode = "rgmii-id";
-       phy-handle = <&phy0>;
-       renesas,no-ether-link;
-       status = "okay";
-
-       phy0: ethernet-phy@0 {
-               rxc-skew-ps = <1500>;
-               reg = <0>;
-               interrupt-parent = <&gpio1>;
-               interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
-       };
-};
-
 &canfd {
        pinctrl-0 = <&canfd0_pins>;
        pinctrl-names = "default";
        clock-frequency = <32768>;
 };
 
+&gether {
+       pinctrl-0 = <&gether_pins>;
+       pinctrl-names = "default";
+
+       phy-mode = "rgmii-id";
+       phy-handle = <&phy0>;
+       renesas,no-ether-link;
+       status = "okay";
+
+       phy0: ethernet-phy@0 {
+               rxc-skew-ps = <1500>;
+               reg = <0>;
+               interrupt-parent = <&gpio4>;
+               interrupts = <23 IRQ_TYPE_LEVEL_LOW>;
+       };
+};
+
 &i2c0 {
        pinctrl-0 = <&i2c0_pins>;
        pinctrl-names = "default";
 };
 
 &pfc {
-       avb_pins: avb {
-               groups = "avb_mdio", "avb_rgmii";
-               function = "avb";
-       };
-
        canfd0_pins: canfd0 {
                groups = "canfd0_data_a";
                function = "canfd0";
        };
 
+       gether_pins: gether {
+               groups = "gether_mdio_a", "gether_rgmii",
+                        "gether_txcrefclk", "gether_txcrefclk_mega";
+               function = "gether";
+       };
+
        i2c0_pins: i2c0 {
                groups = "i2c0";
                function = "i2c0";
index 3e2091708b8e51f04b90e8d6b14c586dd54afeab..6b0d4dff50125e49522212cb7e6db1a778da539d 100644 (file)
 #define KERNEL_DS      UL(-1)
 #define USER_DS                (TASK_SIZE_64 - 1)
 
+/*
+ * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
+ * no point in shifting all network buffers by 2 bytes just to make some IP
+ * header fields appear aligned in memory, potentially sacrificing some DMA
+ * performance on some platforms.
+ */
+#define NET_IP_ALIGN   0
+
 #ifndef __ASSEMBLY__
 #ifdef __KERNEL__
 
index 953e316521fcaa34fcbe26a9ca8ca7de6b9f51e9..f4fc1e0544b73c5c3785ee35a027ec6cb60623dd 100644 (file)
@@ -313,6 +313,7 @@ void __init setup_arch(char **cmdline_p)
        arm64_memblock_init();
 
        paging_init();
+       efi_apply_persistent_mem_reservations();
 
        acpi_table_upgrade();
 
index 9d9582cac6c40cad483d431682a178c67c445b45..9b432d9fcada8dac8e7b1041437387f29785b2af 100644 (file)
@@ -483,8 +483,6 @@ void __init arm64_memblock_init(void)
        high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
 
        dma_contiguous_reserve(arm64_dma_phys_limit);
-
-       memblock_allow_resize();
 }
 
 void __init bootmem_init(void)
index 394b8d554def4c3372425ed5088ee1116ef9898e..d1d6601b385d9214ceadd17ba51057ff4e023177 100644 (file)
@@ -659,6 +659,8 @@ void __init paging_init(void)
 
        memblock_free(__pa_symbol(init_pg_dir),
                      __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
+
+       memblock_allow_resize();
 }
 
 /*
index 6181e4134483c26aa1a34d55e4b316ddad98f5f5..fe3ddd73a0ccb9e4fec24425164cc8c6c7f477bc 100644 (file)
  */
 #ifdef CONFIG_SUN3
 #define PTRS_PER_PTE   16
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 #define PTRS_PER_PMD   1
 #define PTRS_PER_PGD   2048
 #elif defined(CONFIG_COLDFIRE)
 #define PTRS_PER_PTE   512
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 #define PTRS_PER_PMD   1
 #define PTRS_PER_PGD   1024
 #else
index f64ebb9c9a413535c105e3235eb50469d51b5697..e14b6621c933e47e1f87db0114f895b39f5450ef 100644 (file)
@@ -63,7 +63,7 @@ extern int mem_init_done;
 
 #include <asm-generic/4level-fixup.h>
 
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 
 #ifdef __KERNEL__
 #ifndef __ASSEMBLY__
index 75108ec669ebc881c6949962ef61f6368c4a814a..6c79e8a16a2681f01cf4ffb0a702a8414499bf7b 100644 (file)
@@ -67,7 +67,7 @@ void (*cvmx_override_pko_queue_priority) (int pko_port,
 void (*cvmx_override_ipd_port_setup) (int ipd_port);
 
 /* Port count per interface */
-static int interface_port_count[5];
+static int interface_port_count[9];
 
 /**
  * Return the number of interfaces the chip has. Each interface
index e6c9485cadcffc7e0ecba01326ca3b777363edb4..cb38461391cb78c714535d2536b5cb4eed1bddad 100644 (file)
@@ -50,7 +50,7 @@ void *arch_dma_alloc(struct device *dev, size_t size,
        void *ret;
 
        ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
-       if (!ret && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
+       if (ret && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
                dma_cache_wback_inv((unsigned long) ret, size);
                ret = (void *)UNCAC_ADDR(ret);
        }
index d3e19a55cf530046795f7c2836fbc13dc3b823fb..9f52db930c004ecc5c6de013721e06d7b4bf52a3 100644 (file)
@@ -4,7 +4,7 @@
 #ifndef _ASMNDS32_PGTABLE_H
 #define _ASMNDS32_PGTABLE_H
 
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 #include <asm-generic/4level-fixup.h>
 #include <asm-generic/sizes.h>
 
index b941ac7d4e70b35181351565136a9c25e7ee66f0..c7bb74e22436079de3d9f6153e98fe47cf8a9df4 100644 (file)
@@ -111,7 +111,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
 #if CONFIG_PGTABLE_LEVELS == 3
 #define BITS_PER_PMD   (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
 #else
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 #define BITS_PER_PMD   0
 #endif
 #define PTRS_PER_PMD    (1UL << BITS_PER_PMD)
index 16aec9ba2580a6dd3b3b9bfe03d29099815d2412..8a63515f03bfe3931930d094a479060815832fe6 100644 (file)
@@ -37,8 +37,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
        volatile unsigned int *a;
 
        a = __ldcw_align(x);
-       /* Release with ordered store. */
-       __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
+       mb();
+       *a = 1;
 }
 
 static inline int arch_spin_trylock(arch_spinlock_t *x)
index 9505c317818df77cb1e67ea39fa3b43110d32d16..a9bc90dc4ae75e4e0489a297ef64c645a9cc7557 100644 (file)
@@ -640,7 +640,8 @@ cas_action:
        sub,<>  %r28, %r25, %r0
 2:     stw     %r24, 0(%r26)
        /* Free lock */
-       stw,ma  %r20, 0(%sr2,%r20)
+       sync
+       stw     %r20, 0(%sr2,%r20)
 #if ENABLE_LWS_DEBUG
        /* Clear thread register indicator */
        stw     %r0, 4(%sr2,%r20)
@@ -654,7 +655,8 @@ cas_action:
 3:             
        /* Error occurred on load or store */
        /* Free lock */
-       stw,ma  %r20, 0(%sr2,%r20)
+       sync
+       stw     %r20, 0(%sr2,%r20)
 #if ENABLE_LWS_DEBUG
        stw     %r0, 4(%sr2,%r20)
 #endif
@@ -855,7 +857,8 @@ cas2_action:
 
 cas2_end:
        /* Free lock */
-       stw,ma  %r20, 0(%sr2,%r20)
+       sync
+       stw     %r20, 0(%sr2,%r20)
        /* Enable interrupts */
        ssm     PSW_SM_I, %r0
        /* Return to userspace, set no error */
@@ -865,7 +868,8 @@ cas2_end:
 22:
        /* Error occurred on load or store */
        /* Free lock */
-       stw,ma  %r20, 0(%sr2,%r20)
+       sync
+       stw     %r20, 0(%sr2,%r20)
        ssm     PSW_SM_I, %r0
        ldo     1(%r0),%r28
        b       lws_exit
index 3ef40b703c4ab86e7daf3982ae4781de8988b32f..e746becd9d6ff29c65ab0109fb82dd945a046f6d 100644 (file)
@@ -268,19 +268,13 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
  * their hooks, a bitfield is reserved for use by the platform near the
  * top of MMIO addresses (not PIO, those have to cope the hard way).
  *
- * This bit field is 12 bits and is at the top of the IO virtual
- * addresses PCI_IO_INDIRECT_TOKEN_MASK.
+ * The highest address in the kernel virtual space are:
  *
- * The kernel virtual space is thus:
+ *  d0003fffffffffff   # with Hash MMU
+ *  c00fffffffffffff   # with Radix MMU
  *
- *  0xD000000000000000         : vmalloc
- *  0xD000080000000000         : PCI PHB IO space
- *  0xD000080080000000         : ioremap
- *  0xD0000fffffffffff         : end of ioremap region
- *
- * Since the top 4 bits are reserved as the region ID, we use thus
- * the next 12 bits and keep 4 bits available for the future if the
- * virtual address space is ever to be extended.
+ * The top 4 bits are reserved as the region ID on hash, leaving us 8 bits
+ * that can be used for the field.
  *
  * The direct IO mapping operations will then mask off those bits
  * before doing the actual access, though that only happen when
@@ -292,8 +286,8 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
  */
 
 #ifdef CONFIG_PPC_INDIRECT_MMIO
-#define PCI_IO_IND_TOKEN_MASK  0x0fff000000000000ul
-#define PCI_IO_IND_TOKEN_SHIFT 48
+#define PCI_IO_IND_TOKEN_SHIFT 52
+#define PCI_IO_IND_TOKEN_MASK  (0xfful << PCI_IO_IND_TOKEN_SHIFT)
 #define PCI_FIX_ADDR(addr)                                             \
        ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK))
 #define PCI_GET_ADDR_TOKEN(addr)                                       \
index 6093bc8f74e518bf225c014c25521c8a515ba013..a6e9e314c7077044c0bb58590c95dddce4be8ed1 100644 (file)
                                        __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
 #define PPC_SLBFEE_DOT(t, b)   stringify_in_c(.long PPC_INST_SLBFEE | \
                                        __PPC_RT(t) | __PPC_RB(b))
+#define __PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE |  \
+                                              ___PPC_RT(t) | ___PPC_RB(b))
 #define PPC_ICBT(c,a,b)                stringify_in_c(.long PPC_INST_ICBT | \
                                       __PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b))
 /* PASemi instructions */
index f73886a1a7f51714da637c0f9c81a8dfd1107b7f..0b8a735b6d85f08512143b539c5ee5329598c48c 100644 (file)
@@ -54,6 +54,7 @@ struct pt_regs
 
 #ifdef CONFIG_PPC64
        unsigned long ppr;
+       unsigned long __pad;    /* Maintain 16 byte interrupt stack alignment */
 #endif
 };
 #endif
index 2a51e4cc8246d35d18d8ddd54b258af02dda47c4..236c1151a3a77057013313ed5da588673f5f3419 100644 (file)
@@ -636,6 +636,8 @@ static void *__init alloc_stack(unsigned long limit, int cpu)
 {
        unsigned long pa;
 
+       BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
+
        pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit,
                                        early_cpu_to_node(cpu), MEMBLOCK_NONE);
        if (!pa) {
index 491b0f715d6bc2c345850645f2dbcd4700f6f182..ea1d7c80831900c4403443b8d836cd462998bf22 100644 (file)
@@ -6,8 +6,6 @@
 
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM kvm
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace
 
 /*
  * Tracepoint for guest mode entry.
@@ -120,4 +118,10 @@ TRACE_EVENT(kvm_check_requests,
 #endif /* _TRACE_KVM_H */
 
 /* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+
 #include <trace/define_trace.h>
index ac640e81fdc5f43709858ad8b3dd5ec2eee58a8f..3837842986aa46ee4ac80f4759d1051d9221c87c 100644 (file)
@@ -6,8 +6,6 @@
 
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM kvm_booke
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace_booke
 
 #define kvm_trace_symbol_exit \
        {0, "CRITICAL"}, \
@@ -218,4 +216,11 @@ TRACE_EVENT(kvm_booke_queue_irqprio,
 #endif
 
 /* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_booke
+
 #include <trace/define_trace.h>
index bcfe8a987f6a977e65f2e9c7a02962a7099c2a66..8a1e3b0047f190e53a64dfe57c9c88f9ac11d617 100644 (file)
@@ -9,8 +9,6 @@
 
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM kvm_hv
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace_hv
 
 #define kvm_trace_symbol_hcall \
        {H_REMOVE,                      "H_REMOVE"}, \
@@ -497,4 +495,11 @@ TRACE_EVENT(kvmppc_run_vcpu_exit,
 #endif /* _TRACE_KVM_HV_H */
 
 /* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_hv
+
 #include <trace/define_trace.h>
index 2f9a8829552b946ee8a308a2e069ab6a5c9bb1ba..46a46d328fbf2237dd203d3c33d54dbe0db129b1 100644 (file)
@@ -8,8 +8,6 @@
 
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM kvm_pr
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace_pr
 
 TRACE_EVENT(kvm_book3s_reenter,
        TP_PROTO(int r, struct kvm_vcpu *vcpu),
@@ -257,4 +255,11 @@ TRACE_EVENT(kvm_exit,
 #endif /* _TRACE_KVM_H */
 
 /* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_pr
+
 #include <trace/define_trace.h>
index 3a048e98a13231b6ce8ec239d91e98fe1ed2d3a9..ce28ae5ca08033ff36ee157e9b83be3a61d44f52 100644 (file)
@@ -1178,7 +1178,7 @@ static long vphn_get_associativity(unsigned long cpu,
 
        switch (rc) {
        case H_FUNCTION:
-               printk(KERN_INFO
+               printk_once(KERN_INFO
                        "VPHN is not supported. Disabling polling...\n");
                stop_topology_update();
                break;
index c3fdf2969d9faec5cacac62dfd9416011f9b17eb..bc3914d54e26ef8c400c65c92b8c359c171f8207 100644 (file)
@@ -19,6 +19,7 @@
 #include <asm/mmu.h>
 #include <asm/mmu_context.h>
 #include <asm/paca.h>
+#include <asm/ppc-opcode.h>
 #include <asm/cputable.h>
 #include <asm/cacheflush.h>
 #include <asm/smp.h>
@@ -58,27 +59,19 @@ static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
        return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
 }
 
-static void assert_slb_exists(unsigned long ea)
+static void assert_slb_presence(bool present, unsigned long ea)
 {
 #ifdef CONFIG_DEBUG_VM
        unsigned long tmp;
 
        WARN_ON_ONCE(mfmsr() & MSR_EE);
 
-       asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0");
-       WARN_ON(tmp == 0);
-#endif
-}
-
-static void assert_slb_notexists(unsigned long ea)
-{
-#ifdef CONFIG_DEBUG_VM
-       unsigned long tmp;
+       if (!cpu_has_feature(CPU_FTR_ARCH_206))
+               return;
 
-       WARN_ON_ONCE(mfmsr() & MSR_EE);
+       asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0");
 
-       asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0");
-       WARN_ON(tmp != 0);
+       WARN_ON(present == (tmp == 0));
 #endif
 }
 
@@ -114,7 +107,7 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
         */
        slb_shadow_update(ea, ssize, flags, index);
 
-       assert_slb_notexists(ea);
+       assert_slb_presence(false, ea);
        asm volatile("slbmte  %0,%1" :
                     : "r" (mk_vsid_data(ea, ssize, flags)),
                       "r" (mk_esid_data(ea, ssize, index))
@@ -137,7 +130,7 @@ void __slb_restore_bolted_realmode(void)
                       "r" (be64_to_cpu(p->save_area[index].esid)));
        }
 
-       assert_slb_exists(local_paca->kstack);
+       assert_slb_presence(true, local_paca->kstack);
 }
 
 /*
@@ -185,7 +178,7 @@ void slb_flush_and_restore_bolted(void)
                     :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)),
                        "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid))
                     : "memory");
-       assert_slb_exists(get_paca()->kstack);
+       assert_slb_presence(true, get_paca()->kstack);
 
        get_paca()->slb_cache_ptr = 0;
 
@@ -443,9 +436,9 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
                                ea = (unsigned long)
                                        get_paca()->slb_cache[i] << SID_SHIFT;
                                /*
-                                * Could assert_slb_exists here, but hypervisor
-                                * or machine check could have come in and
-                                * removed the entry at this point.
+                                * Could assert_slb_presence(true) here, but
+                                * hypervisor or machine check could have come
+                                * in and removed the entry at this point.
                                 */
 
                                slbie_data = ea;
@@ -676,7 +669,7 @@ static long slb_insert_entry(unsigned long ea, unsigned long context,
         * User preloads should add isync afterwards in case the kernel
         * accesses user memory before it returns to userspace with rfid.
         */
-       assert_slb_notexists(ea);
+       assert_slb_presence(false, ea);
        asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data));
 
        barrier();
@@ -715,7 +708,7 @@ static long slb_allocate_kernel(unsigned long ea, unsigned long id)
                        return -EFAULT;
 
                if (ea < H_VMALLOC_END)
-                       flags = get_paca()->vmalloc_sllp;
+                       flags = local_paca->vmalloc_sllp;
                else
                        flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp;
        } else {
index 6f60e09319223015f5ebd36e3aa1fd9f66078520..75b9352529818899e99a978a4d85beeae535db90 100644 (file)
@@ -102,63 +102,6 @@ struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
 }
 EXPORT_SYMBOL(pnv_pci_get_npu_dev);
 
-#define NPU_DMA_OP_UNSUPPORTED()                                       \
-       dev_err_once(dev, "%s operation unsupported for NVLink devices\n", \
-               __func__)
-
-static void *dma_npu_alloc(struct device *dev, size_t size,
-                          dma_addr_t *dma_handle, gfp_t flag,
-                          unsigned long attrs)
-{
-       NPU_DMA_OP_UNSUPPORTED();
-       return NULL;
-}
-
-static void dma_npu_free(struct device *dev, size_t size,
-                        void *vaddr, dma_addr_t dma_handle,
-                        unsigned long attrs)
-{
-       NPU_DMA_OP_UNSUPPORTED();
-}
-
-static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page,
-                                  unsigned long offset, size_t size,
-                                  enum dma_data_direction direction,
-                                  unsigned long attrs)
-{
-       NPU_DMA_OP_UNSUPPORTED();
-       return 0;
-}
-
-static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist,
-                         int nelems, enum dma_data_direction direction,
-                         unsigned long attrs)
-{
-       NPU_DMA_OP_UNSUPPORTED();
-       return 0;
-}
-
-static int dma_npu_dma_supported(struct device *dev, u64 mask)
-{
-       NPU_DMA_OP_UNSUPPORTED();
-       return 0;
-}
-
-static u64 dma_npu_get_required_mask(struct device *dev)
-{
-       NPU_DMA_OP_UNSUPPORTED();
-       return 0;
-}
-
-static const struct dma_map_ops dma_npu_ops = {
-       .map_page               = dma_npu_map_page,
-       .map_sg                 = dma_npu_map_sg,
-       .alloc                  = dma_npu_alloc,
-       .free                   = dma_npu_free,
-       .dma_supported          = dma_npu_dma_supported,
-       .get_required_mask      = dma_npu_get_required_mask,
-};
-
 /*
  * Returns the PE assoicated with the PCI device of the given
  * NPU. Returns the linked pci device if pci_dev != NULL.
@@ -270,10 +213,11 @@ static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe)
        rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]);
 
        /*
-        * We don't initialise npu_pe->tce32_table as we always use
-        * dma_npu_ops which are nops.
+        * NVLink devices use the same TCE table configuration as
+        * their parent device so drivers shouldn't be doing DMA
+        * operations directly on these devices.
         */
-       set_dma_ops(&npe->pdev->dev, &dma_npu_ops);
+       set_dma_ops(&npe->pdev->dev, NULL);
 }
 
 /*
index d10146197533affd63c3e7392ccd73a4d7ba2e27..4af153a182b071fcd4853a8853fb94fb1333312b 100644 (file)
@@ -77,4 +77,8 @@ core-y += arch/riscv/kernel/ arch/riscv/mm/
 
 libs-y += arch/riscv/lib/
 
+PHONY += vdso_install
+vdso_install:
+       $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
+
 all: vmlinux
index 07fa9ea75fea1f1c72caaa6f246c28df86b6a532..ef4f15df9adf03c091bf621f900413680a2829f2 100644 (file)
@@ -76,4 +76,5 @@ CONFIG_NFS_V4_1=y
 CONFIG_NFS_V4_2=y
 CONFIG_ROOT_NFS=y
 CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_PRINTK_TIME=y
 # CONFIG_RCU_TRACE is not set
index 2c5df945d43c9abfdfd197a61d8c92ce20e48133..bbe1862e8f80cd404164f03a485a89f253f9fa45 100644 (file)
@@ -56,8 +56,8 @@ struct pt_regs {
        unsigned long sstatus;
        unsigned long sbadaddr;
        unsigned long scause;
-        /* a0 value before the syscall */
-        unsigned long orig_a0;
+       /* a0 value before the syscall */
+       unsigned long orig_a0;
 };
 
 #ifdef CONFIG_64BIT
index 3303ed2cd4193f82c51730a992d6c875b361ff80..7dd308129b40f1862ab04dc1e12c790bf7c111fe 100644 (file)
@@ -21,7 +21,7 @@ static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v)
 {
        if (v != (u32)v) {
                pr_err("%s: value %016llx out of range for 32-bit field\n",
-                      me->name, v);
+                      me->name, (long long)v);
                return -EINVAL;
        }
        *location = v;
@@ -102,7 +102,7 @@ static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
        if (offset != (s32)offset) {
                pr_err(
                  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
-                 me->name, v, location);
+                 me->name, (long long)v, location);
                return -EINVAL;
        }
 
@@ -144,7 +144,7 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location,
        if (IS_ENABLED(CMODEL_MEDLOW)) {
                pr_err(
                  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
-                 me->name, v, location);
+                 me->name, (long long)v, location);
                return -EINVAL;
        }
 
@@ -188,7 +188,7 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
        } else {
                pr_err(
                  "%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n",
-                 me->name, v, location);
+                 me->name, (long long)v, location);
                return -EINVAL;
        }
 
@@ -212,7 +212,7 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
                } else {
                        pr_err(
                          "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
-                         me->name, v, location);
+                         me->name, (long long)v, location);
                        return -EINVAL;
                }
        }
@@ -234,7 +234,7 @@ static int apply_r_riscv_call_rela(struct module *me, u32 *location,
        if (offset != fill_v) {
                pr_err(
                  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
-                 me->name, v, location);
+                 me->name, (long long)v, location);
                return -EINVAL;
        }
 
index 5739bd05d289e5034b5d9faee2baebb1be1251ed..4e2e600f7d5384074fff062628cbb04f9113f7ab 100644 (file)
@@ -3,6 +3,6 @@ lib-y   += memcpy.o
 lib-y  += memset.o
 lib-y  += uaccess.o
 
-lib-(CONFIG_64BIT) += tishift.o
+lib-$(CONFIG_64BIT) += tishift.o
 
 lib-$(CONFIG_32BIT) += udivdi3.o
index 0b33577932c3bd9c552c62cfe473979987c97313..e21053e5e0da2a06c3ba78e9967e55837ecaddc0 100644 (file)
@@ -27,7 +27,7 @@ KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-option,-ffreestanding)
 KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
 KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
 UTS_MACHINE    := s390x
-STACK_SIZE     := $(if $(CONFIG_KASAN),32768,16384)
+STACK_SIZE     := $(if $(CONFIG_KASAN),65536,16384)
 CHECKFLAGS     += -D__s390__ -D__s390x__
 
 export LD_BFD
index 593039620487a6cdad8e076272b8e97cacff0153..b1bdd15e3429f39d50b0c8e73896c5539a4cfc5e 100644 (file)
@@ -22,10 +22,10 @@ OBJCOPYFLAGS :=
 OBJECTS := $(addprefix $(obj)/,$(obj-y))
 
 LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
-$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS)
+$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS) FORCE
        $(call if_changed,ld)
 
-OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info
+OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info --set-section-flags .vmlinux.info=load
 $(obj)/info.bin: vmlinux FORCE
        $(call if_changed,objcopy)
 
@@ -46,17 +46,17 @@ suffix-$(CONFIG_KERNEL_LZMA)  := .lzma
 suffix-$(CONFIG_KERNEL_LZO)  := .lzo
 suffix-$(CONFIG_KERNEL_XZ)  := .xz
 
-$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE
        $(call if_changed,gzip)
-$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE
        $(call if_changed,bzip2)
-$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) FORCE
        $(call if_changed,lz4)
-$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE
        $(call if_changed,lzma)
-$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
        $(call if_changed,lzo)
-$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
        $(call if_changed,xzkern)
 
 OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed
index 259d1698ac50a468021e17a6a2fbe93526f520f2..c69cb04b7a5948e56535a145cb788de06fa4bed8 100644 (file)
@@ -64,6 +64,8 @@ CONFIG_NUMA=y
 CONFIG_PREEMPT=y
 CONFIG_HZ_100=y
 CONFIG_KEXEC_FILE=y
+CONFIG_EXPOLINE=y
+CONFIG_EXPOLINE_AUTO=y
 CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_KSM=y
@@ -84,9 +86,11 @@ CONFIG_PCI_DEBUG=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
 CONFIG_CHSC_SCH=y
+CONFIG_VFIO_AP=m
 CONFIG_CRASH_DUMP=y
 CONFIG_BINFMT_MISC=m
 CONFIG_HIBERNATION=y
+CONFIG_PM_DEBUG=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
@@ -161,8 +165,6 @@ CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_CT_NETLINK=m
 CONFIG_NF_CT_NETLINK_TIMEOUT=m
 CONFIG_NF_TABLES=m
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
@@ -365,6 +367,8 @@ CONFIG_NET_ACT_SKBEDIT=m
 CONFIG_NET_ACT_CSUM=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_OPENVSWITCH=m
+CONFIG_VSOCKETS=m
+CONFIG_VIRTIO_VSOCKETS=m
 CONFIG_NETLINK_DIAG=m
 CONFIG_CGROUP_NET_PRIO=y
 CONFIG_BPF_JIT=y
@@ -461,6 +465,7 @@ CONFIG_PPTP=m
 CONFIG_PPPOL2TP=m
 CONFIG_PPP_ASYNC=m
 CONFIG_PPP_SYNC_TTY=m
+CONFIG_ISM=m
 CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
@@ -486,9 +491,12 @@ CONFIG_MLX4_INFINIBAND=m
 CONFIG_MLX5_INFINIBAND=m
 CONFIG_VFIO=m
 CONFIG_VFIO_PCI=m
+CONFIG_VFIO_MDEV=m
+CONFIG_VFIO_MDEV_DEVICE=m
 CONFIG_VIRTIO_PCI=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
+CONFIG_S390_AP_IOMMU=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
@@ -615,7 +623,6 @@ CONFIG_DEBUG_CREDENTIALS=y
 CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=300
 CONFIG_NOTIFIER_ERROR_INJECTION=m
-CONFIG_PM_NOTIFIER_ERROR_INJECT=m
 CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
 CONFIG_FAULT_INJECTION=y
 CONFIG_FAILSLAB=y
@@ -727,3 +734,4 @@ CONFIG_APPLDATA_BASE=y
 CONFIG_KVM=m
 CONFIG_KVM_S390_UCONTROL=y
 CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
index 37fd60c20e22dec8cd8452baaf89135debccf735..32f539dc9c19240d589a5cb62fb51e0a30d9baf5 100644 (file)
@@ -65,6 +65,8 @@ CONFIG_NR_CPUS=512
 CONFIG_NUMA=y
 CONFIG_HZ_100=y
 CONFIG_KEXEC_FILE=y
+CONFIG_EXPOLINE=y
+CONFIG_EXPOLINE_AUTO=y
 CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_KSM=y
@@ -82,9 +84,11 @@ CONFIG_PCI=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
 CONFIG_CHSC_SCH=y
+CONFIG_VFIO_AP=m
 CONFIG_CRASH_DUMP=y
 CONFIG_BINFMT_MISC=m
 CONFIG_HIBERNATION=y
+CONFIG_PM_DEBUG=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
@@ -159,8 +163,6 @@ CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_CT_NETLINK=m
 CONFIG_NF_CT_NETLINK_TIMEOUT=m
 CONFIG_NF_TABLES=m
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
@@ -362,6 +364,8 @@ CONFIG_NET_ACT_SKBEDIT=m
 CONFIG_NET_ACT_CSUM=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_OPENVSWITCH=m
+CONFIG_VSOCKETS=m
+CONFIG_VIRTIO_VSOCKETS=m
 CONFIG_NETLINK_DIAG=m
 CONFIG_CGROUP_NET_PRIO=y
 CONFIG_BPF_JIT=y
@@ -458,6 +462,7 @@ CONFIG_PPTP=m
 CONFIG_PPPOL2TP=m
 CONFIG_PPP_ASYNC=m
 CONFIG_PPP_SYNC_TTY=m
+CONFIG_ISM=m
 CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
@@ -483,9 +488,12 @@ CONFIG_MLX4_INFINIBAND=m
 CONFIG_MLX5_INFINIBAND=m
 CONFIG_VFIO=m
 CONFIG_VFIO_PCI=m
+CONFIG_VFIO_MDEV=m
+CONFIG_VFIO_MDEV_DEVICE=m
 CONFIG_VIRTIO_PCI=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
+CONFIG_S390_AP_IOMMU=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
@@ -666,3 +674,4 @@ CONFIG_APPLDATA_BASE=y
 CONFIG_KVM=m
 CONFIG_KVM_S390_UCONTROL=y
 CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
index 7cb6a52f727dafc6c994423b0db21ccafec4993a..4d58a92b5d979f15e3469240c47a8e6f5fc4c189 100644 (file)
@@ -26,14 +26,23 @@ CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_PERF=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 # CONFIG_SYSFS_SYSCALL is not set
-CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
+CONFIG_LIVEPATCH=y
+CONFIG_NR_CPUS=256
+CONFIG_NUMA=y
+CONFIG_HZ_100=y
+CONFIG_KEXEC_FILE=y
+CONFIG_CRASH_DUMP=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_DEBUG=y
+CONFIG_CMM=m
 CONFIG_OPROFILE=y
 CONFIG_KPROBES=y
 CONFIG_JUMP_LABEL=y
@@ -44,11 +53,7 @@ CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_DEFAULT_DEADLINE=y
-CONFIG_LIVEPATCH=y
-CONFIG_NR_CPUS=256
-CONFIG_NUMA=y
-CONFIG_HZ_100=y
-CONFIG_KEXEC_FILE=y
+CONFIG_BINFMT_MISC=m
 CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_KSM=y
@@ -60,9 +65,6 @@ CONFIG_ZBUD=m
 CONFIG_ZSMALLOC=m
 CONFIG_ZSMALLOC_STAT=y
 CONFIG_IDLE_PAGE_TRACKING=y
-CONFIG_CRASH_DUMP=y
-CONFIG_BINFMT_MISC=m
-CONFIG_HIBERNATION=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -98,6 +100,7 @@ CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_VIRTIO_BLK=y
 CONFIG_SCSI=y
+# CONFIG_SCSI_MQ_DEFAULT is not set
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=y
 CONFIG_BLK_DEV_SR=y
@@ -131,6 +134,7 @@ CONFIG_EQUALIZER=m
 CONFIG_TUN=m
 CONFIG_VIRTIO_NET=y
 # CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_AURORA is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
@@ -157,33 +161,6 @@ CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
 CONFIG_HUGETLBFS=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_INFO_DWARF4=y
-CONFIG_GDB_SCRIPTS=y
-CONFIG_UNUSED_SYMBOLS=y
-CONFIG_DEBUG_SECTION_MISMATCH=y
-CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_PAGEALLOC=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_PANIC_ON_OOPS=y
-CONFIG_PROVE_LOCKING=y
-CONFIG_LOCK_STAT=y
-CONFIG_DEBUG_LOCKDEP=y
-CONFIG_DEBUG_ATOMIC_SLEEP=y
-CONFIG_DEBUG_LIST=y
-CONFIG_DEBUG_SG=y
-CONFIG_DEBUG_NOTIFIERS=y
-CONFIG_RCU_CPU_STALL_TIMEOUT=60
-CONFIG_LATENCYTOP=y
-CONFIG_SCHED_TRACER=y
-CONFIG_FTRACE_SYSCALLS=y
-CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
-CONFIG_STACK_TRACER=y
-CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_FUNCTION_PROFILER=y
-# CONFIG_RUNTIME_TESTING_MENU is not set
-CONFIG_S390_PTDUMP=y
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_AUTHENC=m
 CONFIG_CRYPTO_TEST=m
@@ -193,6 +170,7 @@ CONFIG_CRYPTO_CBC=y
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_CMAC=m
@@ -231,7 +209,6 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_ZCRYPT=m
-CONFIG_ZCRYPT_MULTIDEVNODES=y
 CONFIG_PKEY=m
 CONFIG_CRYPTO_PAES_S390=m
 CONFIG_CRYPTO_SHA1_S390=m
@@ -247,4 +224,30 @@ CONFIG_CRC7=m
 # CONFIG_XZ_DEC_ARM is not set
 # CONFIG_XZ_DEC_ARMTHUMB is not set
 # CONFIG_XZ_DEC_SPARC is not set
-CONFIG_CMM=m
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_LOCK_STAT=y
+CONFIG_DEBUG_LOCKDEP=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_LATENCYTOP=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
+CONFIG_STACK_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_FUNCTION_PROFILER=y
+# CONFIG_RUNTIME_TESTING_MENU is not set
+CONFIG_S390_PTDUMP=y
index dbd689d556ce5dd9368392a1e0676c18163acc3c..ccbb53e2202404b85aae86e883d3e64405d2d305 100644 (file)
@@ -46,8 +46,6 @@ static inline int init_new_context(struct task_struct *tsk,
                mm->context.asce_limit = STACK_TOP_MAX;
                mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
                                   _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
-               /* pgd_alloc() did not account this pud */
-               mm_inc_nr_puds(mm);
                break;
        case -PAGE_SIZE:
                /* forked 5-level task, set new asce with new_mm->pgd */
@@ -63,9 +61,6 @@ static inline int init_new_context(struct task_struct *tsk,
                /* forked 2-level compat task, set new asce with new mm->pgd */
                mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
                                   _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
-               /* pgd_alloc() did not account this pmd */
-               mm_inc_nr_pmds(mm);
-               mm_inc_nr_puds(mm);
        }
        crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
        return 0;
index f0f9bcf94c03749b0f0030d9de5765cff1597d37..5ee733720a5716b2308210d497f9c8ab73485cfa 100644 (file)
@@ -36,11 +36,11 @@ static inline void crst_table_init(unsigned long *crst, unsigned long entry)
 
 static inline unsigned long pgd_entry_type(struct mm_struct *mm)
 {
-       if (mm->context.asce_limit <= _REGION3_SIZE)
+       if (mm_pmd_folded(mm))
                return _SEGMENT_ENTRY_EMPTY;
-       if (mm->context.asce_limit <= _REGION2_SIZE)
+       if (mm_pud_folded(mm))
                return _REGION3_ENTRY_EMPTY;
-       if (mm->context.asce_limit <= _REGION1_SIZE)
+       if (mm_p4d_folded(mm))
                return _REGION2_ENTRY_EMPTY;
        return _REGION1_ENTRY_EMPTY;
 }
index 411d435e7a7d2a5a8c650c812017d66f9738710a..063732414dfbb5076c431d13e694e239e878ebef 100644 (file)
@@ -493,6 +493,24 @@ static inline int is_module_addr(void *addr)
                                   _REGION_ENTRY_PROTECT | \
                                   _REGION_ENTRY_NOEXEC)
 
+static inline bool mm_p4d_folded(struct mm_struct *mm)
+{
+       return mm->context.asce_limit <= _REGION1_SIZE;
+}
+#define mm_p4d_folded(mm) mm_p4d_folded(mm)
+
+static inline bool mm_pud_folded(struct mm_struct *mm)
+{
+       return mm->context.asce_limit <= _REGION2_SIZE;
+}
+#define mm_pud_folded(mm) mm_pud_folded(mm)
+
+static inline bool mm_pmd_folded(struct mm_struct *mm)
+{
+       return mm->context.asce_limit <= _REGION3_SIZE;
+}
+#define mm_pmd_folded(mm) mm_pmd_folded(mm)
+
 static inline int mm_has_pgste(struct mm_struct *mm)
 {
 #ifdef CONFIG_PGSTE
index 302795c47c06c299b732ed73de7b057a71b3805c..81038ab357ce955682b713f0c4241611ba5f931f 100644 (file)
@@ -236,7 +236,7 @@ static inline unsigned long current_stack_pointer(void)
        return sp;
 }
 
-static __no_sanitize_address_or_inline unsigned short stap(void)
+static __no_kasan_or_inline unsigned short stap(void)
 {
        unsigned short cpu_address;
 
@@ -330,7 +330,7 @@ static inline void __load_psw(psw_t psw)
  * Set PSW mask to specified value, while leaving the
  * PSW addr pointing to the next instruction.
  */
-static __no_sanitize_address_or_inline void __load_psw_mask(unsigned long mask)
+static __no_kasan_or_inline void __load_psw_mask(unsigned long mask)
 {
        unsigned long addr;
        psw_t psw;
index 27248f42a03c4561a9e1481fbea205b3b866f928..ce4e17c9aad6fa266d306676df4e7cdc69eb7df0 100644 (file)
@@ -14,7 +14,7 @@
  * General size of kernel stacks
  */
 #ifdef CONFIG_KASAN
-#define THREAD_SIZE_ORDER 3
+#define THREAD_SIZE_ORDER 4
 #else
 #define THREAD_SIZE_ORDER 2
 #endif
index 457b7ba0fbb66de24fd82219e18a51ad2663221f..b31c779cf58176ad3bf91ee816053cbcf40b3476 100644 (file)
@@ -136,7 +136,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
 static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
                                unsigned long address)
 {
-       if (tlb->mm->context.asce_limit <= _REGION3_SIZE)
+       if (mm_pmd_folded(tlb->mm))
                return;
        pgtable_pmd_page_dtor(virt_to_page(pmd));
        tlb_remove_table(tlb, pmd);
@@ -152,7 +152,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
 static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
                                unsigned long address)
 {
-       if (tlb->mm->context.asce_limit <= _REGION1_SIZE)
+       if (mm_p4d_folded(tlb->mm))
                return;
        tlb_remove_table(tlb, p4d);
 }
@@ -167,7 +167,7 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
 static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
                                unsigned long address)
 {
-       if (tlb->mm->context.asce_limit <= _REGION2_SIZE)
+       if (mm_pud_folded(tlb->mm))
                return;
        tlb_remove_table(tlb, pud);
 }
index 724fba4d09d2df3a35c372224ddc944c9def3ace..39191a0feed1cdedd692e68826b3b19db581cbbb 100644 (file)
@@ -236,10 +236,10 @@ ENTRY(__switch_to)
        stmg    %r6,%r15,__SF_GPRS(%r15)        # store gprs of prev task
        lghi    %r4,__TASK_stack
        lghi    %r1,__TASK_thread
-       lg      %r5,0(%r4,%r3)                  # start of kernel stack of next
+       llill   %r5,STACK_INIT
        stg     %r15,__THREAD_ksp(%r1,%r2)      # store kernel stack of prev
-       lgr     %r15,%r5
-       aghi    %r15,STACK_INIT                 # end of kernel stack of next
+       lg      %r15,0(%r4,%r3)                 # start of kernel stack of next
+       agr     %r15,%r5                        # end of kernel stack of next
        stg     %r3,__LC_CURRENT                # store task struct of next
        stg     %r15,__LC_KERNEL_STACK          # store end of kernel stack
        lg      %r15,__THREAD_ksp(%r1,%r3)      # load kernel stack of next
index cc085e2d2ce9907690fbe0912dd301ab44e8171d..74091fd3101e9122943b9155572a1c46d2bf9858 100644 (file)
@@ -373,7 +373,7 @@ static int __hw_perf_event_init(struct perf_event *event)
                return -ENOENT;
 
        if (ev > PERF_CPUM_CF_MAX_CTR)
-               return -EINVAL;
+               return -ENOENT;
 
        /* Obtain the counter set to which the specified counter belongs */
        set = get_counter_set(ev);
index 7bf604ff50a1bd082024c85fb5d32e06cca9c4f8..bfabeb1889cc0cca5c6859cb36bbbeb15b662049 100644 (file)
@@ -1842,10 +1842,30 @@ static void cpumsf_pmu_del(struct perf_event *event, int flags)
 CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF);
 CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
 
-static struct attribute *cpumsf_pmu_events_attr[] = {
-       CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC),
-       NULL,
-       NULL,
+/* Attribute list for CPU_SF.
+ *
+ * The availablitiy depends on the CPU_MF sampling facility authorization
+ * for basic + diagnositic samples. This is determined at initialization
+ * time by the sampling facility device driver.
+ * If the authorization for basic samples is turned off, it should be
+ * also turned off for diagnostic sampling.
+ *
+ * During initialization of the device driver, check the authorization
+ * level for diagnostic sampling and installs the attribute
+ * file for diagnostic sampling if necessary.
+ *
+ * For now install a placeholder to reference all possible attributes:
+ * SF_CYCLES_BASIC and SF_CYCLES_BASIC_DIAG.
+ * Add another entry for the final NULL pointer.
+ */
+enum {
+       SF_CYCLES_BASIC_ATTR_IDX = 0,
+       SF_CYCLES_BASIC_DIAG_ATTR_IDX,
+       SF_CYCLES_ATTR_MAX
+};
+
+static struct attribute *cpumsf_pmu_events_attr[SF_CYCLES_ATTR_MAX + 1] = {
+       [SF_CYCLES_BASIC_ATTR_IDX] = CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC)
 };
 
 PMU_FORMAT_ATTR(event, "config:0-63");
@@ -2040,7 +2060,10 @@ static int __init init_cpum_sampling_pmu(void)
 
        if (si.ad) {
                sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
-               cpumsf_pmu_events_attr[1] =
+               /* Sampling of diagnostic data authorized,
+                * install event into attribute list of PMU device.
+                */
+               cpumsf_pmu_events_attr[SF_CYCLES_BASIC_DIAG_ATTR_IDX] =
                        CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
        }
 
index eb8aebea3ea7bd7a6967136b6cb9aee3e25473aa..e76309fbbcb3b6e23af21350f98f2b555502b978 100644 (file)
@@ -37,7 +37,7 @@ KASAN_SANITIZE := n
 $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
 
 # link rule for the .so file, .lds has to be first
-$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32)
+$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE
        $(call if_changed,vdso32ld)
 
 # strip rule for the .so file
@@ -46,12 +46,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
        $(call if_changed,objcopy)
 
 # assembly rules for the .S files
-$(obj-vdso32): %.o: %.S
+$(obj-vdso32): %.o: %.S FORCE
        $(call if_changed_dep,vdso32as)
 
 # actual build commands
 quiet_cmd_vdso32ld = VDSO32L $@
-      cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
+      cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
 quiet_cmd_vdso32as = VDSO32A $@
       cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $<
 
index a22b2cf86eec985d7f3bf32da11f5f0c220c28e7..f849ac61c5da02ee8b764bc3c01fc44c16137e04 100644 (file)
@@ -37,7 +37,7 @@ KASAN_SANITIZE := n
 $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
 
 # link rule for the .so file, .lds has to be first
-$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64)
+$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE
        $(call if_changed,vdso64ld)
 
 # strip rule for the .so file
@@ -46,12 +46,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
        $(call if_changed,objcopy)
 
 # assembly rules for the .S files
-$(obj-vdso64): %.o: %.S
+$(obj-vdso64): %.o: %.S FORCE
        $(call if_changed_dep,vdso64as)
 
 # actual build commands
 quiet_cmd_vdso64ld = VDSO64L $@
-      cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
+      cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
 quiet_cmd_vdso64as = VDSO64A $@
       cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
 
index 21eb7407d51bac8e71f3743defba1f7de5291e3d..8429ab07971575394622444ea6be40eb85b37f62 100644 (file)
@@ -154,14 +154,14 @@ SECTIONS
         * uncompressed image info used by the decompressor
         * it should match struct vmlinux_info
         */
-       .vmlinux.info 0 : {
+       .vmlinux.info 0 (INFO) : {
                QUAD(_stext)                                    /* default_lma */
                QUAD(startup_continue)                          /* entry */
                QUAD(__bss_start - _stext)                      /* image_size */
                QUAD(__bss_stop - __bss_start)                  /* bss_size */
                QUAD(__boot_data_start)                         /* bootdata_off */
                QUAD(__boot_data_end - __boot_data_start)       /* bootdata_size */
-       }
+       } :NONE
 
        /* Debugging sections.  */
        STABS_DEBUG
index 76d89ee8b428837fc6c32f962d0104787caa29a3..814f26520aa2c2439de4e10ce52bf0476c8f2661 100644 (file)
@@ -101,6 +101,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
                        mm->context.asce_limit = _REGION1_SIZE;
                        mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
                                _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
+                       mm_inc_nr_puds(mm);
                } else {
                        crst_table_init(table, _REGION1_ENTRY_EMPTY);
                        pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
index ae0d9e889534cd880f750845fb58d919080e9325..d31bde0870d894bdc2cd3a3006d966924d1d5c1c 100644 (file)
@@ -53,6 +53,7 @@ int __node_distance(int a, int b)
 {
        return mode->distance ? mode->distance(a, b) : 0;
 }
+EXPORT_SYMBOL(__node_distance);
 
 int numa_debug_enabled;
 
index 74c002ddc0ce74868286b77f43dfa6885e6c3e70..28c40624bcb6f0e9b15030037d6f199b46c5fa0f 100644 (file)
@@ -1305,6 +1305,7 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
                io_req->fds[0] = dev->cow.fd;
        else
                io_req->fds[0] = dev->fd;
+       io_req->error = 0;
 
        if (req_op(req) == REQ_OP_FLUSH) {
                io_req->op = UBD_FLUSH;
@@ -1313,9 +1314,7 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
                io_req->cow_offset = -1;
                io_req->offset = off;
                io_req->length = bvec->bv_len;
-               io_req->error = 0;
                io_req->sector_mask = 0;
-
                io_req->op = rq_data_dir(req) == READ ? UBD_READ : UBD_WRITE;
                io_req->offsets[0] = 0;
                io_req->offsets[1] = dev->cow.data_offset;
@@ -1341,11 +1340,14 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
 static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
                                 const struct blk_mq_queue_data *bd)
 {
+       struct ubd *ubd_dev = hctx->queue->queuedata;
        struct request *req = bd->rq;
        int ret = 0;
 
        blk_mq_start_request(req);
 
+       spin_lock_irq(&ubd_dev->lock);
+
        if (req_op(req) == REQ_OP_FLUSH) {
                ret = ubd_queue_one_vec(hctx, req, 0, NULL);
        } else {
@@ -1361,9 +1363,11 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
                }
        }
 out:
-       if (ret < 0) {
+       spin_unlock_irq(&ubd_dev->lock);
+
+       if (ret < 0)
                blk_mq_requeue_request(req, true);
-       }
+
        return BLK_STS_OK;
 }
 
index ba7e3464ee9235fe43f0edd66034d670b2fc4ffd..9d734f3c8234d4bbdd44bb06518a768a6b9b7d02 100644 (file)
@@ -525,7 +525,6 @@ config X86_VSMP
        bool "ScaleMP vSMP"
        select HYPERVISOR_GUEST
        select PARAVIRT
-       select PARAVIRT_XXL
        depends on X86_64 && PCI
        depends on X86_EXTENDED_PLATFORM
        depends on SMP
index 5b562e4640099086493bc0fa6d46da88a0780f09..88398fdf81291ff7c58aef6f705ddf0910ecb59d 100644 (file)
@@ -213,8 +213,6 @@ ifdef CONFIG_X86_64
 KBUILD_LDFLAGS += $(call ld-option, -z max-page-size=0x200000)
 endif
 
-# Speed up the build
-KBUILD_CFLAGS += -pipe
 # Workaround for a gcc prelease that unfortunately was shipped in a suse release
 KBUILD_CFLAGS += -Wno-sign-compare
 #
@@ -239,7 +237,7 @@ archheaders:
 archmacros:
        $(Q)$(MAKE) $(build)=arch/x86/kernel arch/x86/kernel/macros.s
 
-ASM_MACRO_FLAGS = -Wa,arch/x86/kernel/macros.s -Wa,-
+ASM_MACRO_FLAGS = -Wa,arch/x86/kernel/macros.s
 export ASM_MACRO_FLAGS
 KBUILD_CFLAGS += $(ASM_MACRO_FLAGS)
 
index e17ab885b1e928d17a671eb96f12cf21905bdb93..cb46d602a6b8bd17eb458f84778019b56b15a93c 100644 (file)
@@ -129,8 +129,15 @@ struct intel_uncore_box {
        struct intel_uncore_extra_reg shared_regs[0];
 };
 
-#define UNCORE_BOX_FLAG_INITIATED      0
-#define UNCORE_BOX_FLAG_CTL_OFFS8      1 /* event config registers are 8-byte apart */
+/* CFL uncore 8th cbox MSRs */
+#define CFL_UNC_CBO_7_PERFEVTSEL0              0xf70
+#define CFL_UNC_CBO_7_PER_CTR0                 0xf76
+
+#define UNCORE_BOX_FLAG_INITIATED              0
+/* event config registers are 8-byte apart */
+#define UNCORE_BOX_FLAG_CTL_OFFS8              1
+/* CFL 8th CBOX has different MSR space */
+#define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS     2
 
 struct uncore_event_desc {
        struct kobj_attribute attr;
@@ -297,17 +304,27 @@ unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
 static inline
 unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
 {
-       return box->pmu->type->event_ctl +
-               (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
-               uncore_msr_box_offset(box);
+       if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
+               return CFL_UNC_CBO_7_PERFEVTSEL0 +
+                      (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
+       } else {
+               return box->pmu->type->event_ctl +
+                      (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
+                      uncore_msr_box_offset(box);
+       }
 }
 
 static inline
 unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
 {
-       return box->pmu->type->perf_ctr +
-               (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
-               uncore_msr_box_offset(box);
+       if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
+               return CFL_UNC_CBO_7_PER_CTR0 +
+                      (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
+       } else {
+               return box->pmu->type->perf_ctr +
+                      (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
+                      uncore_msr_box_offset(box);
+       }
 }
 
 static inline
index 8527c3e1038b78d868743274c35368ab318649ca..2593b0d7aeee6089413d980618ce07c35d84c101 100644 (file)
 #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
 #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
+#define PCI_DEVICE_ID_INTEL_KBL_Y_IMC  0x590c
+#define PCI_DEVICE_ID_INTEL_KBL_U_IMC  0x5904
+#define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914
+#define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f
+#define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f
+#define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc
+#define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0
+#define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10
+#define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4
+#define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC       0x3e0f
+#define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC       0x3e1f
+#define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC       0x3ec2
+#define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC       0x3e30
+#define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC       0x3e18
+#define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC       0x3ec6
+#define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC       0x3e31
+#define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC       0x3e33
+#define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC       0x3eca
+#define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC       0x3e32
 
 /* SNB event control */
 #define SNB_UNC_CTL_EV_SEL_MASK                        0x000000ff
@@ -202,6 +221,10 @@ static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
                wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
                        SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
        }
+
+       /* The 8th CBOX has different MSR space */
+       if (box->pmu->pmu_idx == 7)
+               __set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags);
 }
 
 static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
@@ -228,7 +251,7 @@ static struct intel_uncore_ops skl_uncore_msr_ops = {
 static struct intel_uncore_type skl_uncore_cbox = {
        .name           = "cbox",
        .num_counters   = 4,
-       .num_boxes      = 5,
+       .num_boxes      = 8,
        .perf_ctr_bits  = 44,
        .fixed_ctr_bits = 48,
        .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
@@ -569,7 +592,82 @@ static const struct pci_device_id skl_uncore_pci_ids[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
        },
-
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
        { /* end: all zeroes */ },
 };
 
@@ -618,6 +716,25 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
        IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Quad Core */
        IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Dual Core */
        IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Quad Core */
+       IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core Y */
+       IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U */
+       IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U Quad Core */
+       IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S Dual Core */
+       IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S Quad Core */
+       IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U 2 Cores */
+       IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U 4 Cores */
+       IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core H 4 Cores */
+       IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core H 6 Cores */
+       IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 2 Cores Desktop */
+       IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Desktop */
+       IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Desktop */
+       IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Desktop */
+       IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Work Station */
+       IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Work Station */
+       IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Work Station */
+       IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Server */
+       IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Server */
+       IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Server */
        {  /* end marker */ }
 };
 
index 4da9b1c58d287bbdda427e31dd67a1653b519043..c1a812bd5a27d770da1076c5b22ca9dc7dd66762 100644 (file)
@@ -221,6 +221,8 @@ static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_am
 
 int mce_available(struct cpuinfo_x86 *c);
 bool mce_is_memory_error(struct mce *m);
+bool mce_is_correctable(struct mce *m);
+int mce_usable_address(struct mce *m);
 
 DECLARE_PER_CPU(unsigned, mce_exception_count);
 DECLARE_PER_CPU(unsigned, mce_poll_count);
index 0d6271cce198dcd1ac0108ac9a4ea803a6e8b2dc..1d0a7778e16317cab0087c46a1c30a8754e3ec8e 100644 (file)
@@ -232,7 +232,7 @@ static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2)
                                      : "cc");
        }
 #endif
-               return hv_status;
+       return hv_status;
 }
 
 /*
index cd0cf1c568b4cef2fcc5b16c4ebcf374ee9add0d..8f657286d599a9577dca86b46b8199c9c547a661 100644 (file)
 
 /*
  * Set __PAGE_OFFSET to the most negative possible address +
- * PGDIR_SIZE*16 (pgd slot 272).  The gap is to allow a space for a
- * hypervisor to fit.  Choosing 16 slots here is arbitrary, but it's
- * what Xen requires.
+ * PGDIR_SIZE*17 (pgd slot 273).
+ *
+ * The gap is to allow a space for LDT remap for PTI (1 pgd slot) and space for
+ * a hypervisor (16 slots). Choosing 16 slots for a hypervisor is arbitrary,
+ * but it's what Xen requires.
  */
-#define __PAGE_OFFSET_BASE_L5  _AC(0xff10000000000000, UL)
-#define __PAGE_OFFSET_BASE_L4  _AC(0xffff880000000000, UL)
+#define __PAGE_OFFSET_BASE_L5  _AC(0xff11000000000000, UL)
+#define __PAGE_OFFSET_BASE_L4  _AC(0xffff888000000000, UL)
 
 #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
 #define __PAGE_OFFSET           page_offset_base
index 04edd2d58211a78e3261993bd8d0e088e3b4c4ef..84bd9bdc1987faa634cd1daad7dbfe94d586a82b 100644 (file)
@@ -111,9 +111,7 @@ extern unsigned int ptrs_per_p4d;
  */
 #define MAXMEM                 (1UL << MAX_PHYSMEM_BITS)
 
-#define LDT_PGD_ENTRY_L4       -3UL
-#define LDT_PGD_ENTRY_L5       -112UL
-#define LDT_PGD_ENTRY          (pgtable_l5_enabled() ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4)
+#define LDT_PGD_ENTRY          -240UL
 #define LDT_BASE_ADDR          (LDT_PGD_ENTRY << PGDIR_SHIFT)
 #define LDT_END_ADDR           (LDT_BASE_ADDR + PGDIR_SIZE)
 
index 87623c6b13db5c735bfe80d377f678f1a5f1b893..bd5ac6cc37db5f87c92cc3013138dffa8b2a0302 100644 (file)
 #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
 static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
 {
-       u32 val = 0;
-
-       if (GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
-                            "I", _Q_PENDING_OFFSET))
-               val |= _Q_PENDING_VAL;
+       u32 val;
 
+       /*
+        * We can't use GEN_BINARY_RMWcc() inside an if() stmt because asm goto
+        * and CONFIG_PROFILE_ALL_BRANCHES=y results in a label inside a
+        * statement expression, which GCC doesn't like.
+        */
+       val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
+                              "I", _Q_PENDING_OFFSET) * _Q_PENDING_VAL;
        val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
 
        return val;
index 123e669bf363d375820ba3ab2ce981f01aa4329e..790ce08e41f20f4b16a9c085204ea877d6db5e5e 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/mm.h>
 #include <linux/device.h>
 
-#include <linux/uaccess.h>
+#include <asm/extable.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
 
@@ -93,12 +93,39 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
  */
 static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val)
 {
-       return __put_user(val, (unsigned long __user *)addr);
+       int ret = 0;
+
+       asm volatile("1: mov %[val], %[ptr]\n"
+                    "2:\n"
+                    ".section .fixup, \"ax\"\n"
+                    "3: sub $1, %[ret]\n"
+                    "   jmp 2b\n"
+                    ".previous\n"
+                    _ASM_EXTABLE(1b, 3b)
+                    : [ret] "+r" (ret), [ptr] "=m" (*addr)
+                    : [val] "r" (val));
+
+       return ret;
 }
 
-static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
+static inline int xen_safe_read_ulong(const unsigned long *addr,
+                                     unsigned long *val)
 {
-       return __get_user(*val, (unsigned long __user *)addr);
+       int ret = 0;
+       unsigned long rval = ~0ul;
+
+       asm volatile("1: mov %[ptr], %[rval]\n"
+                    "2:\n"
+                    ".section .fixup, \"ax\"\n"
+                    "3: sub $1, %[ret]\n"
+                    "   jmp 2b\n"
+                    ".previous\n"
+                    _ASM_EXTABLE(1b, 3b)
+                    : [ret] "+r" (ret), [rval] "+r" (rval)
+                    : [ptr] "m" (*addr));
+       *val = rval;
+
+       return ret;
 }
 
 #ifdef CONFIG_XEN_PV
index 8c66d2fc8f81dd1d2404f22e5a06ac696ee8fad9..36d2696c9563e88a8e354068d7e8a43d636371d3 100644 (file)
@@ -485,7 +485,7 @@ static void mce_report_event(struct pt_regs *regs)
  * be somewhat complicated (e.g. segment offset would require an instruction
  * parser). So only support physical addresses up to page granuality for now.
  */
-static int mce_usable_address(struct mce *m)
+int mce_usable_address(struct mce *m)
 {
        if (!(m->status & MCI_STATUS_ADDRV))
                return 0;
@@ -505,6 +505,7 @@ static int mce_usable_address(struct mce *m)
 
        return 1;
 }
+EXPORT_SYMBOL_GPL(mce_usable_address);
 
 bool mce_is_memory_error(struct mce *m)
 {
@@ -534,7 +535,7 @@ bool mce_is_memory_error(struct mce *m)
 }
 EXPORT_SYMBOL_GPL(mce_is_memory_error);
 
-static bool mce_is_correctable(struct mce *m)
+bool mce_is_correctable(struct mce *m)
 {
        if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
                return false;
@@ -547,6 +548,7 @@ static bool mce_is_correctable(struct mce *m)
 
        return true;
 }
+EXPORT_SYMBOL_GPL(mce_is_correctable);
 
 static bool cec_add_mce(struct mce *m)
 {
index 1c72f3819eb123d8fb7271a0c49cf94aceae5ee3..e81a2db42df7ba0d6fb28d9b0b2fcf0340bbd585 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/kexec.h>
+#include <linux/i8253.h>
 #include <asm/processor.h>
 #include <asm/hypervisor.h>
 #include <asm/hyperv-tlfs.h>
@@ -295,6 +296,16 @@ static void __init ms_hyperv_init_platform(void)
        if (efi_enabled(EFI_BOOT))
                x86_platform.get_nmi_reason = hv_get_nmi_reason;
 
+       /*
+        * Hyper-V VMs have a PIT emulation quirk such that zeroing the
+        * counter register during PIT shutdown restarts the PIT. So it
+        * continues to interrupt @18.2 HZ. Setting i8253_clear_counter
+        * to false tells pit_shutdown() not to zero the counter so that
+        * the PIT really is shutdown. Generation 2 VMs don't have a PIT,
+        * and setting this value has no effect.
+        */
+       i8253_clear_counter_on_shutdown = false;
+
 #if IS_ENABLED(CONFIG_HYPERV)
        /*
         * Setup the hook to get control post apic initialization.
index d9ab49bed8afce9fb2eef06d457709debcf073ba..0eda91f8eeacee4d2511e60383728d22789a948f 100644 (file)
@@ -77,7 +77,7 @@ static __init int setup_vmw_sched_clock(char *s)
 }
 early_param("no-vmw-sched-clock", setup_vmw_sched_clock);
 
-static unsigned long long vmware_sched_clock(void)
+static unsigned long long notrace vmware_sched_clock(void)
 {
        unsigned long long ns;
 
index ab18e0884dc6fdfb6e403760921b3dc87d7ff592..6135ae8ce0364772f5cc72f73b4bb8f2ad3a8d9e 100644 (file)
@@ -199,14 +199,6 @@ static void sanity_check_ldt_mapping(struct mm_struct *mm)
 /*
  * If PTI is enabled, this maps the LDT into the kernelmode and
  * usermode tables for the given mm.
- *
- * There is no corresponding unmap function.  Even if the LDT is freed, we
- * leave the PTEs around until the slot is reused or the mm is destroyed.
- * This is harmless: the LDT is always in ordinary memory, and no one will
- * access the freed slot.
- *
- * If we wanted to unmap freed LDTs, we'd also need to do a flush to make
- * it useful, and the flush would slow down modify_ldt().
  */
 static int
 map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
@@ -214,8 +206,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
        unsigned long va;
        bool is_vmalloc;
        spinlock_t *ptl;
-       pgd_t *pgd;
-       int i;
+       int i, nr_pages;
 
        if (!static_cpu_has(X86_FEATURE_PTI))
                return 0;
@@ -229,16 +220,11 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
        /* Check if the current mappings are sane */
        sanity_check_ldt_mapping(mm);
 
-       /*
-        * Did we already have the top level entry allocated?  We can't
-        * use pgd_none() for this because it doens't do anything on
-        * 4-level page table kernels.
-        */
-       pgd = pgd_offset(mm, LDT_BASE_ADDR);
-
        is_vmalloc = is_vmalloc_addr(ldt->entries);
 
-       for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) {
+       nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
+
+       for (i = 0; i < nr_pages; i++) {
                unsigned long offset = i << PAGE_SHIFT;
                const void *src = (char *)ldt->entries + offset;
                unsigned long pfn;
@@ -272,13 +258,39 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
        /* Propagate LDT mapping to the user page-table */
        map_ldt_struct_to_user(mm);
 
-       va = (unsigned long)ldt_slot_va(slot);
-       flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, PAGE_SHIFT, false);
-
        ldt->slot = slot;
        return 0;
 }
 
+static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
+{
+       unsigned long va;
+       int i, nr_pages;
+
+       if (!ldt)
+               return;
+
+       /* LDT map/unmap is only required for PTI */
+       if (!static_cpu_has(X86_FEATURE_PTI))
+               return;
+
+       nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
+
+       for (i = 0; i < nr_pages; i++) {
+               unsigned long offset = i << PAGE_SHIFT;
+               spinlock_t *ptl;
+               pte_t *ptep;
+
+               va = (unsigned long)ldt_slot_va(ldt->slot) + offset;
+               ptep = get_locked_pte(mm, va, &ptl);
+               pte_clear(mm, va, ptep);
+               pte_unmap_unlock(ptep, ptl);
+       }
+
+       va = (unsigned long)ldt_slot_va(ldt->slot);
+       flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, PAGE_SHIFT, false);
+}
+
 #else /* !CONFIG_PAGE_TABLE_ISOLATION */
 
 static int
@@ -286,6 +298,10 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
 {
        return 0;
 }
+
+static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
+{
+}
 #endif /* CONFIG_PAGE_TABLE_ISOLATION */
 
 static void free_ldt_pgtables(struct mm_struct *mm)
@@ -524,6 +540,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
        }
 
        install_ldt(mm, new_ldt);
+       unmap_ldt_struct(mm, old_ldt);
        free_ldt_struct(old_ldt);
        error = 0;
 
index 1eae5af491c278367630f805c48d78cc32f29cbb..891a75dbc131323b70e9776998cc0b437b4fe033 100644 (file)
 
 #define TOPOLOGY_REGISTER_OFFSET 0x10
 
-#if defined CONFIG_PCI && defined CONFIG_PARAVIRT_XXL
-/*
- * Interrupt control on vSMPowered systems:
- * ~AC is a shadow of IF.  If IF is 'on' AC should be 'off'
- * and vice versa.
- */
-
-asmlinkage __visible unsigned long vsmp_save_fl(void)
-{
-       unsigned long flags = native_save_fl();
-
-       if (!(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC))
-               flags &= ~X86_EFLAGS_IF;
-       return flags;
-}
-PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
-
-__visible void vsmp_restore_fl(unsigned long flags)
-{
-       if (flags & X86_EFLAGS_IF)
-               flags &= ~X86_EFLAGS_AC;
-       else
-               flags |= X86_EFLAGS_AC;
-       native_restore_fl(flags);
-}
-PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
-
-asmlinkage __visible void vsmp_irq_disable(void)
-{
-       unsigned long flags = native_save_fl();
-
-       native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
-}
-PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
-
-asmlinkage __visible void vsmp_irq_enable(void)
-{
-       unsigned long flags = native_save_fl();
-
-       native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
-}
-PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable);
-
-static unsigned __init vsmp_patch(u8 type, void *ibuf,
-                                 unsigned long addr, unsigned len)
-{
-       switch (type) {
-       case PARAVIRT_PATCH(irq.irq_enable):
-       case PARAVIRT_PATCH(irq.irq_disable):
-       case PARAVIRT_PATCH(irq.save_fl):
-       case PARAVIRT_PATCH(irq.restore_fl):
-               return paravirt_patch_default(type, ibuf, addr, len);
-       default:
-               return native_patch(type, ibuf, addr, len);
-       }
-
-}
-
-static void __init set_vsmp_pv_ops(void)
+#ifdef CONFIG_PCI
+static void __init set_vsmp_ctl(void)
 {
        void __iomem *address;
        unsigned int cap, ctl, cfg;
@@ -109,28 +52,12 @@ static void __init set_vsmp_pv_ops(void)
        }
 #endif
 
-       if (cap & ctl & (1 << 4)) {
-               /* Setup irq ops and turn on vSMP  IRQ fastpath handling */
-               pv_ops.irq.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
-               pv_ops.irq.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable);
-               pv_ops.irq.save_fl = PV_CALLEE_SAVE(vsmp_save_fl);
-               pv_ops.irq.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl);
-               pv_ops.init.patch = vsmp_patch;
-               ctl &= ~(1 << 4);
-       }
        writel(ctl, address + 4);
        ctl = readl(address + 4);
        pr_info("vSMP CTL: control set to:0x%08x\n", ctl);
 
        early_iounmap(address, 8);
 }
-#else
-static void __init set_vsmp_pv_ops(void)
-{
-}
-#endif
-
-#ifdef CONFIG_PCI
 static int is_vsmp = -1;
 
 static void __init detect_vsmp_box(void)
@@ -164,11 +91,14 @@ static int is_vsmp_box(void)
 {
        return 0;
 }
+static void __init set_vsmp_ctl(void)
+{
+}
 #endif
 
 static void __init vsmp_cap_cpus(void)
 {
-#if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP)
+#if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
        void __iomem *address;
        unsigned int cfg, topology, node_shift, maxcpus;
 
@@ -221,6 +151,6 @@ void __init vsmp_init(void)
 
        vsmp_cap_cpus();
 
-       set_vsmp_pv_ops();
+       set_vsmp_ctl();
        return;
 }
index 0d7b3ae4960bb0cc424cdc853cdf2f35a73835ae..a5d7ed12533707f8714e066cd4be4c30f880988d 100644 (file)
@@ -1905,7 +1905,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
        init_top_pgt[0] = __pgd(0);
 
        /* Pre-constructed entries are in pfn, so convert to mfn */
-       /* L4[272] -> level3_ident_pgt  */
+       /* L4[273] -> level3_ident_pgt  */
        /* L4[511] -> level3_kernel_pgt */
        convert_pfn_mfn(init_top_pgt);
 
@@ -1925,8 +1925,8 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
        addr[0] = (unsigned long)pgd;
        addr[1] = (unsigned long)l3;
        addr[2] = (unsigned long)l2;
-       /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
-        * Both L4[272][0] and L4[511][510] have entries that point to the same
+       /* Graft it onto L4[273][0]. Note that we creating an aliasing problem:
+        * Both L4[273][0] and L4[511][510] have entries that point to the same
         * L2 (PMD) tables. Meaning that if you modify it in __va space
         * it will be also modified in the __ka space! (But if you just
         * modify the PMD table to point to other PTE's or none, then you
index b06731705529b1e4c339bc21db8de0a565acf6d8..055e37e43541ed17d11cf4a194085b7fc3a3192c 100644 (file)
@@ -656,8 +656,7 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 
        /*
         * The interface requires atomic updates on p2m elements.
-        * xen_safe_write_ulong() is using __put_user which does an atomic
-        * store via asm().
+        * xen_safe_write_ulong() is using an atomic store via asm().
         */
        if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn)))
                return true;
index 441c8826216982a4fb9532b68d68a37cbb0d3e05..1c8a8816a402abd09bf199f57314daf61d9819d9 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/log2.h>
 #include <linux/gfp.h>
 #include <linux/slab.h>
+#include <linux/atomic.h>
 
 #include <asm/paravirt.h>
 #include <asm/qspinlock.h>
@@ -21,6 +22,7 @@
 
 static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
 static DEFINE_PER_CPU(char *, irq_name);
+static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest);
 static bool xen_pvspin = true;
 
 static void xen_qlock_kick(int cpu)
@@ -39,25 +41,25 @@ static void xen_qlock_kick(int cpu)
  */
 static void xen_qlock_wait(u8 *byte, u8 val)
 {
-       unsigned long flags;
        int irq = __this_cpu_read(lock_kicker_irq);
+       atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest);
 
        /* If kicker interrupts not initialized yet, just spin */
        if (irq == -1 || in_nmi())
                return;
 
-       /* Guard against reentry. */
-       local_irq_save(flags);
+       /* Detect reentry. */
+       atomic_inc(nest_cnt);
 
-       /* If irq pending already clear it. */
-       if (xen_test_irq_pending(irq)) {
+       /* If irq pending already and no nested call clear it. */
+       if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) {
                xen_clear_irq_pending(irq);
        } else if (READ_ONCE(*byte) == val) {
                /* Block until irq becomes pending (or a spurious wakeup) */
                xen_poll_irq(irq);
        }
 
-       local_irq_restore(flags);
+       atomic_dec(nest_cnt);
 }
 
 static irqreturn_t dummy_handler(int irq, void *dev_id)
index be9bfd9aa865beb554b2b6e7ce92010cf276b927..34a23016dd1442f5c95d445f13276d97c772072d 100644 (file)
 # error Linux requires the Xtensa Windowed Registers Option.
 #endif
 
-#define ARCH_SLAB_MINALIGN     XCHAL_DATA_WIDTH
+/* Xtensa ABI requires stack alignment to be at least 16 */
+
+#define STACK_ALIGN (XCHAL_DATA_WIDTH > 16 ? XCHAL_DATA_WIDTH : 16)
+
+#define ARCH_SLAB_MINALIGN STACK_ALIGN
 
 /*
  * User space process size: 1 GB.
index 2f76118ecf6230ff01fe0e43221269da7b208f46..9053a5622d2c3435faefe4950f953b7923e8332f 100644 (file)
@@ -88,9 +88,12 @@ _SetupMMU:
        initialize_mmu
 #if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
        rsr     a2, excsave1
-       movi    a3, 0x08000000
+       movi    a3, XCHAL_KSEG_PADDR
+       bltu    a2, a3, 1f
+       sub     a2, a2, a3
+       movi    a3, XCHAL_KSEG_SIZE
        bgeu    a2, a3, 1f
-       movi    a3, 0xd0000000
+       movi    a3, XCHAL_KSEG_CACHED_VADDR
        add     a2, a2, a3
        wsr     a2, excsave1
 1:
index d5368a4455613452972e0b8b06ea3e1ceacb335e..4f4d9884443b63a8f002ddd754ea467f9a0e4c16 100644 (file)
@@ -605,6 +605,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
        if (bio_flagged(bio_src, BIO_THROTTLED))
                bio_set_flag(bio, BIO_THROTTLED);
        bio->bi_opf = bio_src->bi_opf;
+       bio->bi_ioprio = bio_src->bi_ioprio;
        bio->bi_write_hint = bio_src->bi_write_hint;
        bio->bi_iter = bio_src->bi_iter;
        bio->bi_io_vec = bio_src->bi_io_vec;
@@ -1260,6 +1261,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
                if (ret)
                        goto cleanup;
        } else {
+               zero_fill_bio(bio);
                iov_iter_advance(iter, bio->bi_iter.bi_size);
        }
 
index ce12515f9b9b9930da4515ed8f70a4cf5f5b946f..deb56932f8c46e9cb0fe0950000b8da1922addfc 100644 (file)
@@ -798,9 +798,8 @@ void blk_cleanup_queue(struct request_queue *q)
         * dispatch may still be in-progress since we dispatch requests
         * from more than one contexts.
         *
-        * No need to quiesce queue if it isn't initialized yet since
-        * blk_freeze_queue() should be enough for cases of passthrough
-        * request.
+        * We rely on driver to deal with the race in case that queue
+        * initialization isn't done.
         */
        if (q->mq_ops && blk_queue_init_done(q))
                blk_mq_quiesce_queue(q);
index 76f867ea9a9b92fdfa921843a6a0ffe2c4297087..5f2c429d437847447bc329a00c11a91f58a28edf 100644 (file)
@@ -51,16 +51,14 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
        if ((sector | nr_sects) & bs_mask)
                return -EINVAL;
 
-       while (nr_sects) {
-               unsigned int req_sects = nr_sects;
-               sector_t end_sect;
+       if (!nr_sects)
+               return -EINVAL;
 
-               if (!req_sects)
-                       goto fail;
-               if (req_sects > UINT_MAX >> 9)
-                       req_sects = UINT_MAX >> 9;
+       while (nr_sects) {
+               sector_t req_sects = min_t(sector_t, nr_sects,
+                               bio_allowed_max_sectors(q));
 
-               end_sect = sector + req_sects;
+               WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
 
                bio = blk_next_bio(bio, 0, gfp_mask);
                bio->bi_iter.bi_sector = sector;
@@ -68,8 +66,8 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                bio_set_op_attrs(bio, op, 0);
 
                bio->bi_iter.bi_size = req_sects << 9;
+               sector += req_sects;
                nr_sects -= req_sects;
-               sector = end_sect;
 
                /*
                 * We can loop for a long time in here, if someone does
@@ -82,14 +80,6 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 
        *biop = bio;
        return 0;
-
-fail:
-       if (bio) {
-               submit_bio_wait(bio);
-               bio_put(bio);
-       }
-       *biop = NULL;
-       return -EOPNOTSUPP;
 }
 EXPORT_SYMBOL(__blkdev_issue_discard);
 
@@ -161,7 +151,7 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
                return -EOPNOTSUPP;
 
        /* Ensure that max_write_same_sectors doesn't overflow bi_size */
-       max_write_same_sectors = UINT_MAX >> 9;
+       max_write_same_sectors = bio_allowed_max_sectors(q);
 
        while (nr_sects) {
                bio = blk_next_bio(bio, 1, gfp_mask);
index 6b5ad275ed565de274746b1ef473f46a3a20a621..e7696c47489ad1f8caa11a64ff8fdb2b6d0f41a2 100644 (file)
@@ -46,7 +46,7 @@ static inline bool bio_will_gap(struct request_queue *q,
                bio_get_first_bvec(prev_rq->bio, &pb);
        else
                bio_get_first_bvec(prev, &pb);
-       if (pb.bv_offset)
+       if (pb.bv_offset & queue_virt_boundary(q))
                return true;
 
        /*
@@ -90,7 +90,8 @@ static struct bio *blk_bio_discard_split(struct request_queue *q,
        /* Zero-sector (unknown) and one-sector granularities are the same.  */
        granularity = max(q->limits.discard_granularity >> 9, 1U);
 
-       max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+       max_discard_sectors = min(q->limits.max_discard_sectors,
+                       bio_allowed_max_sectors(q));
        max_discard_sectors -= max_discard_sectors % granularity;
 
        if (unlikely(!max_discard_sectors)) {
index a1841b8ff12963a883047780762229f923989f01..0089fefdf771d7082ee05ca97504005090a26025 100644 (file)
@@ -169,7 +169,7 @@ static inline bool biovec_phys_mergeable(struct request_queue *q,
 static inline bool __bvec_gap_to_prev(struct request_queue *q,
                struct bio_vec *bprv, unsigned int offset)
 {
-       return offset ||
+       return (offset & queue_virt_boundary(q)) ||
                ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
 }
 
@@ -395,6 +395,16 @@ static inline unsigned long blk_rq_deadline(struct request *rq)
        return rq->__deadline & ~0x1UL;
 }
 
+/*
+ * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
+ * is defined as 'unsigned int', meantime it has to aligned to with logical
+ * block size which is the minimum accepted unit by hardware.
+ */
+static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
+{
+       return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
+}
+
 /*
  * Internal io_context interface
  */
index 36869afc258ccf6ea609e0e74db6cea56e6d2c34..559c55bda040e2da3d2ec1bc66dacb6e7f02b829 100644 (file)
@@ -248,6 +248,7 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
                return NULL;
        bio->bi_disk            = bio_src->bi_disk;
        bio->bi_opf             = bio_src->bi_opf;
+       bio->bi_ioprio          = bio_src->bi_ioprio;
        bio->bi_write_hint      = bio_src->bi_write_hint;
        bio->bi_iter.bi_sector  = bio_src->bi_iter.bi_sector;
        bio->bi_iter.bi_size    = bio_src->bi_iter.bi_size;
index e41f6cc33fff49f2b35ad52504742ba79c70c3b8..784748dbb19f0c58482ad18c761c7de121d41928 100644 (file)
@@ -84,7 +84,7 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
 {
        struct crypto_report_cipher rcipher;
 
-       strlcpy(rcipher.type, "cipher", sizeof(rcipher.type));
+       strncpy(rcipher.type, "cipher", sizeof(rcipher.type));
 
        rcipher.blocksize = alg->cra_blocksize;
        rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
@@ -103,7 +103,7 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
 {
        struct crypto_report_comp rcomp;
 
-       strlcpy(rcomp.type, "compression", sizeof(rcomp.type));
+       strncpy(rcomp.type, "compression", sizeof(rcomp.type));
        if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
                    sizeof(struct crypto_report_comp), &rcomp))
                goto nla_put_failure;
@@ -117,7 +117,7 @@ static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
 {
        struct crypto_report_acomp racomp;
 
-       strlcpy(racomp.type, "acomp", sizeof(racomp.type));
+       strncpy(racomp.type, "acomp", sizeof(racomp.type));
 
        if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
                    sizeof(struct crypto_report_acomp), &racomp))
@@ -132,7 +132,7 @@ static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
 {
        struct crypto_report_akcipher rakcipher;
 
-       strlcpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
+       strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
 
        if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
                    sizeof(struct crypto_report_akcipher), &rakcipher))
@@ -147,7 +147,7 @@ static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
 {
        struct crypto_report_kpp rkpp;
 
-       strlcpy(rkpp.type, "kpp", sizeof(rkpp.type));
+       strncpy(rkpp.type, "kpp", sizeof(rkpp.type));
 
        if (nla_put(skb, CRYPTOCFGA_REPORT_KPP,
                    sizeof(struct crypto_report_kpp), &rkpp))
@@ -161,10 +161,10 @@ nla_put_failure:
 static int crypto_report_one(struct crypto_alg *alg,
                             struct crypto_user_alg *ualg, struct sk_buff *skb)
 {
-       strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
-       strlcpy(ualg->cru_driver_name, alg->cra_driver_name,
+       strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
+       strncpy(ualg->cru_driver_name, alg->cra_driver_name,
                sizeof(ualg->cru_driver_name));
-       strlcpy(ualg->cru_module_name, module_name(alg->cra_module),
+       strncpy(ualg->cru_module_name, module_name(alg->cra_module),
                sizeof(ualg->cru_module_name));
 
        ualg->cru_type = 0;
@@ -177,7 +177,7 @@ static int crypto_report_one(struct crypto_alg *alg,
        if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
                struct crypto_report_larval rl;
 
-               strlcpy(rl.type, "larval", sizeof(rl.type));
+               strncpy(rl.type, "larval", sizeof(rl.type));
                if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
                            sizeof(struct crypto_report_larval), &rl))
                        goto nla_put_failure;
index 021ad06bbb628b5bc199ccded03c44429bae061c..1dfaa0ccd555b5bd3246822365114fa69c7e5ae1 100644 (file)
@@ -37,6 +37,8 @@ static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg)
        u64 v64;
        u32 v32;
 
+       memset(&raead, 0, sizeof(raead));
+
        strncpy(raead.type, "aead", sizeof(raead.type));
 
        v32 = atomic_read(&alg->encrypt_cnt);
@@ -65,6 +67,8 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
        u64 v64;
        u32 v32;
 
+       memset(&rcipher, 0, sizeof(rcipher));
+
        strlcpy(rcipher.type, "cipher", sizeof(rcipher.type));
 
        v32 = atomic_read(&alg->encrypt_cnt);
@@ -93,6 +97,8 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
        u64 v64;
        u32 v32;
 
+       memset(&rcomp, 0, sizeof(rcomp));
+
        strlcpy(rcomp.type, "compression", sizeof(rcomp.type));
        v32 = atomic_read(&alg->compress_cnt);
        rcomp.stat_compress_cnt = v32;
@@ -120,6 +126,8 @@ static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
        u64 v64;
        u32 v32;
 
+       memset(&racomp, 0, sizeof(racomp));
+
        strlcpy(racomp.type, "acomp", sizeof(racomp.type));
        v32 = atomic_read(&alg->compress_cnt);
        racomp.stat_compress_cnt = v32;
@@ -147,6 +155,8 @@ static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
        u64 v64;
        u32 v32;
 
+       memset(&rakcipher, 0, sizeof(rakcipher));
+
        strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
        v32 = atomic_read(&alg->encrypt_cnt);
        rakcipher.stat_encrypt_cnt = v32;
@@ -177,6 +187,8 @@ static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
        struct crypto_stat rkpp;
        u32 v;
 
+       memset(&rkpp, 0, sizeof(rkpp));
+
        strlcpy(rkpp.type, "kpp", sizeof(rkpp.type));
 
        v = atomic_read(&alg->setsecret_cnt);
@@ -203,6 +215,8 @@ static int crypto_report_ahash(struct sk_buff *skb, struct crypto_alg *alg)
        u64 v64;
        u32 v32;
 
+       memset(&rhash, 0, sizeof(rhash));
+
        strncpy(rhash.type, "ahash", sizeof(rhash.type));
 
        v32 = atomic_read(&alg->hash_cnt);
@@ -227,6 +241,8 @@ static int crypto_report_shash(struct sk_buff *skb, struct crypto_alg *alg)
        u64 v64;
        u32 v32;
 
+       memset(&rhash, 0, sizeof(rhash));
+
        strncpy(rhash.type, "shash", sizeof(rhash.type));
 
        v32 = atomic_read(&alg->hash_cnt);
@@ -251,6 +267,8 @@ static int crypto_report_rng(struct sk_buff *skb, struct crypto_alg *alg)
        u64 v64;
        u32 v32;
 
+       memset(&rrng, 0, sizeof(rrng));
+
        strncpy(rrng.type, "rng", sizeof(rrng.type));
 
        v32 = atomic_read(&alg->generate_cnt);
@@ -275,6 +293,8 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
                                 struct crypto_user_alg *ualg,
                                 struct sk_buff *skb)
 {
+       memset(ualg, 0, sizeof(*ualg));
+
        strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
        strlcpy(ualg->cru_driver_name, alg->cra_driver_name,
                sizeof(ualg->cru_driver_name));
@@ -291,6 +311,7 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
        if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
                struct crypto_stat rl;
 
+               memset(&rl, 0, sizeof(rl));
                strlcpy(rl.type, "larval", sizeof(rl.type));
                if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL,
                            sizeof(struct crypto_stat), &rl))
index ea7240be3001ba245c12d3214c11a7c7e6a8a1fd..78e8d037ae2b342d94ff837d6c9de82b6a4a1090 100644 (file)
@@ -124,8 +124,9 @@ static int simd_skcipher_init(struct crypto_skcipher *tfm)
 
        ctx->cryptd_tfm = cryptd_tfm;
 
-       reqsize = sizeof(struct skcipher_request);
-       reqsize += crypto_skcipher_reqsize(&cryptd_tfm->base);
+       reqsize = crypto_skcipher_reqsize(cryptd_skcipher_child(cryptd_tfm));
+       reqsize = max(reqsize, crypto_skcipher_reqsize(&cryptd_tfm->base));
+       reqsize += sizeof(struct skcipher_request);
 
        crypto_skcipher_set_reqsize(tfm, reqsize);
 
index 8f3a444c6ea9233a2c0cd116e387a71c65d2360f..7cea769c37df55b50c55a7e8751a05057b5348cd 100644 (file)
@@ -512,7 +512,7 @@ config CRC_PMIC_OPREGION
 
 config XPOWER_PMIC_OPREGION
        bool "ACPI operation region support for XPower AXP288 PMIC"
-       depends on MFD_AXP20X_I2C && IOSF_MBI
+       depends on MFD_AXP20X_I2C && IOSF_MBI=y
        help
          This config adds ACPI operation region support for XPower AXP288 PMIC.
 
index f8c638f3c946d904fd0d9ea0aa3301bca2bafd14..14d9f5bea0151c39e706ebfb4ecddb1962fdcc33 100644 (file)
@@ -2928,9 +2928,9 @@ static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc)
                return rc;
 
        if (ars_status_process_records(acpi_desc))
-               return -ENOMEM;
+               dev_err(acpi_desc->dev, "Failed to process ARS records\n");
 
-       return 0;
+       return rc;
 }
 
 static int ars_register(struct acpi_nfit_desc *acpi_desc,
@@ -3341,8 +3341,6 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
                struct nvdimm *nvdimm, unsigned int cmd)
 {
        struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
-       struct nfit_spa *nfit_spa;
-       int rc = 0;
 
        if (nvdimm)
                return 0;
@@ -3355,17 +3353,10 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
         * just needs guarantees that any ARS it initiates are not
         * interrupted by any intervening start requests from userspace.
         */
-       mutex_lock(&acpi_desc->init_mutex);
-       list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
-               if (acpi_desc->scrub_spa
-                               || test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state)
-                               || test_bit(ARS_REQ_LONG, &nfit_spa->ars_state)) {
-                       rc = -EBUSY;
-                       break;
-               }
-       mutex_unlock(&acpi_desc->init_mutex);
+       if (work_busy(&acpi_desc->dwork.work))
+               return -EBUSY;
 
-       return rc;
+       return 0;
 }
 
 int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
index e9626bf6ca2960a2398aeeefc0f4c9e814e60c1b..d6c1b10f6c2542a8cfbbac6dae31246cd35134f7 100644 (file)
@@ -25,8 +25,12 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
        struct acpi_nfit_desc *acpi_desc;
        struct nfit_spa *nfit_spa;
 
-       /* We only care about memory errors */
-       if (!mce_is_memory_error(mce))
+       /* We only care about uncorrectable memory errors */
+       if (!mce_is_memory_error(mce) || mce_is_correctable(mce))
+               return NOTIFY_DONE;
+
+       /* Verify the address reported in the MCE is valid. */
+       if (!mce_usable_address(mce))
                return NOTIFY_DONE;
 
        /*
index 6e594644cb1d360dabbdf3a4b68851a45c5bfb0f..a7f5202a48152a42e6b2b7ab5835289d8cb6fcca 100644 (file)
@@ -4553,7 +4553,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        /* These specific Samsung models/firmware-revs do not handle LPM well */
        { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
        { "SAMSUNG SSD PM830 mSATA *",  "CXM13D1Q", ATA_HORKAGE_NOLPM, },
-       { "SAMSUNG MZ7TD256HAFV-000L9", "DXT02L5Q", ATA_HORKAGE_NOLPM, },
+       { "SAMSUNG MZ7TD256HAFV-000L9", NULL,       ATA_HORKAGE_NOLPM, },
 
        /* devices that don't properly handle queued TRIM commands */
        { "Micron_M500IT_*",            "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
index 10ecb232245db8c617ee808966db432ece834358..4b1ff5bc256a3032191f090226ffb4c5d0286ae9 100644 (file)
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Renesas R-Car SATA driver
  *
  * Author: Vladimir Barinov <source@cogentembedded.com>
  * Copyright (C) 2013-2015 Cogent Embedded, Inc.
  * Copyright (C) 2013-2015 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
  */
 
 #include <linux/kernel.h>
index a8cfa011c28483ef389ee161b5ca86af71eac13e..fb23578e9a416703648154b7371f05bbe3f5ceb8 100644 (file)
@@ -4148,10 +4148,11 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
        bio.bi_end_io = floppy_rb0_cb;
        bio_set_op_attrs(&bio, REQ_OP_READ, 0);
 
+       init_completion(&cbdata.complete);
+
        submit_bio(&bio);
        process_fd_request();
 
-       init_completion(&cbdata.complete);
        wait_for_completion(&cbdata.complete);
 
        __free_page(page);
index 56452cabce5b587cb7309f9ba24640bbf0ba05da..0ed4b200fa5855e10a142b6f6ce237901cf749ec 100644 (file)
@@ -1919,6 +1919,7 @@ static int negotiate_mq(struct blkfront_info *info)
                              GFP_KERNEL);
        if (!info->rinfo) {
                xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
+               info->nr_rings = 0;
                return -ENOMEM;
        }
 
index ef0ca9414f371bc3275b7afce529029e10b49f68..ff83e899df71fca602aadba443fdce73308eb9c5 100644 (file)
@@ -210,6 +210,7 @@ static int of_fixed_factor_clk_remove(struct platform_device *pdev)
 {
        struct clk *clk = platform_get_drvdata(pdev);
 
+       of_clk_del_provider(pdev->dev.of_node);
        clk_unregister_fixed_factor(clk);
 
        return 0;
index c981159b02c0f09c604a78005f26103c75962e9c..792735d7e46ea0faf3299f710813df3f98cd3834 100644 (file)
@@ -325,6 +325,7 @@ static struct clk_regmap axg_fclk_div2 = {
                .ops = &clk_regmap_gate_ops,
                .parent_names = (const char *[]){ "fclk_div2_div" },
                .num_parents = 1,
+               .flags = CLK_IS_CRITICAL,
        },
 };
 
@@ -349,6 +350,18 @@ static struct clk_regmap axg_fclk_div3 = {
                .ops = &clk_regmap_gate_ops,
                .parent_names = (const char *[]){ "fclk_div3_div" },
                .num_parents = 1,
+               /*
+                * FIXME:
+                * This clock, as fdiv2, is used by the SCPI FW and is required
+                * by the platform to operate correctly.
+                * Until the following condition are met, we need this clock to
+                * be marked as critical:
+                * a) The SCPI generic driver claims and enable all the clocks
+                *    it needs
+                * b) CCF has a clock hand-off mechanism to make the sure the
+                *    clock stays on until the proper driver comes along
+                */
+               .flags = CLK_IS_CRITICAL,
        },
 };
 
index 9309cfaaa464ebd5f3e7d26e174c3c8449e16208..4ada9668fd49c2596de2667aebccd841ee673bb5 100644 (file)
@@ -506,6 +506,18 @@ static struct clk_regmap gxbb_fclk_div3 = {
                .ops = &clk_regmap_gate_ops,
                .parent_names = (const char *[]){ "fclk_div3_div" },
                .num_parents = 1,
+               /*
+                * FIXME:
+                * This clock, as fdiv2, is used by the SCPI FW and is required
+                * by the platform to operate correctly.
+                * Until the following condition are met, we need this clock to
+                * be marked as critical:
+                * a) The SCPI generic driver claims and enable all the clocks
+                *    it needs
+                * b) CCF has a clock hand-off mechanism to make the sure the
+                *    clock stays on until the proper driver comes along
+                */
+               .flags = CLK_IS_CRITICAL,
        },
 };
 
index e4ca6a45f31397324d4f79378b59036a38218641..ef1b267cb058a4a03f0ead86218ee165653fd737 100644 (file)
@@ -265,7 +265,7 @@ static struct clk_fixed_factor cxo = {
        .div = 1,
        .hw.init = &(struct clk_init_data){
                .name = "cxo",
-               .parent_names = (const char *[]){ "xo_board" },
+               .parent_names = (const char *[]){ "xo-board" },
                .num_parents = 1,
                .ops = &clk_fixed_factor_ops,
        },
index 9c38895542f4abb5bff8c487ff22701e008443a2..d4350bb10b83a26aa1c9a56555ff8a20e949148a 100644 (file)
 DEFINE_RAW_SPINLOCK(i8253_lock);
 EXPORT_SYMBOL(i8253_lock);
 
+/*
+ * Handle PIT quirk in pit_shutdown() where zeroing the counter register
+ * restarts the PIT, negating the shutdown. On platforms with the quirk,
+ * platform specific code can set this to false.
+ */
+bool i8253_clear_counter_on_shutdown __ro_after_init = true;
+
 #ifdef CONFIG_CLKSRC_I8253
 /*
  * Since the PIT overflows every tick, its not very useful
@@ -109,8 +116,11 @@ static int pit_shutdown(struct clock_event_device *evt)
        raw_spin_lock(&i8253_lock);
 
        outb_p(0x30, PIT_MODE);
-       outb_p(0, PIT_CH0);
-       outb_p(0, PIT_CH0);
+
+       if (i8253_clear_counter_on_shutdown) {
+               outb_p(0, PIT_CH0);
+               outb_p(0, PIT_CH0);
+       }
 
        raw_spin_unlock(&i8253_lock);
        return 0;
index 8cfee0ab804b43e2dc90e9f55b241a7aa17de363..d8c3595e90236e5f9d87ca9b5f55a7cbdb76ccdc 100644 (file)
@@ -160,8 +160,13 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
        /* Ensure the arm clock divider is what we expect */
        ret = clk_set_rate(clks[ARM].clk, new_freq * 1000);
        if (ret) {
+               int ret1;
+
                dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
-               regulator_set_voltage_tol(arm_reg, volt_old, 0);
+               ret1 = regulator_set_voltage_tol(arm_reg, volt_old, 0);
+               if (ret1)
+                       dev_warn(cpu_dev,
+                                "failed to restore vddarm voltage: %d\n", ret1);
                return ret;
        }
 
index 073557f433eb1be630a7f64b8cfc9930771eff2a..3a407a3ef22b4c5a53046452c75c3784e0b77d9d 100644 (file)
@@ -82,7 +82,6 @@ static int __init arm_idle_init_cpu(int cpu)
 {
        int ret;
        struct cpuidle_driver *drv;
-       struct cpuidle_device *dev;
 
        drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
        if (!drv)
@@ -103,13 +102,6 @@ static int __init arm_idle_init_cpu(int cpu)
                goto out_kfree_drv;
        }
 
-       ret = cpuidle_register_driver(drv);
-       if (ret) {
-               if (ret != -EBUSY)
-                       pr_err("Failed to register cpuidle driver\n");
-               goto out_kfree_drv;
-       }
-
        /*
         * Call arch CPU operations in order to initialize
         * idle states suspend back-end specific data
@@ -117,37 +109,21 @@ static int __init arm_idle_init_cpu(int cpu)
        ret = arm_cpuidle_init(cpu);
 
        /*
-        * Skip the cpuidle device initialization if the reported
+        * Allow the initialization to continue for other CPUs, if the reported
         * failure is a HW misconfiguration/breakage (-ENXIO).
         */
-       if (ret == -ENXIO)
-               return 0;
-
        if (ret) {
                pr_err("CPU %d failed to init idle CPU ops\n", cpu);
-               goto out_unregister_drv;
-       }
-
-       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
-       if (!dev) {
-               ret = -ENOMEM;
-               goto out_unregister_drv;
+               ret = ret == -ENXIO ? 0 : ret;
+               goto out_kfree_drv;
        }
-       dev->cpu = cpu;
 
-       ret = cpuidle_register_device(dev);
-       if (ret) {
-               pr_err("Failed to register cpuidle device for CPU %d\n",
-                      cpu);
-               goto out_kfree_dev;
-       }
+       ret = cpuidle_register(drv, NULL);
+       if (ret)
+               goto out_kfree_drv;
 
        return 0;
 
-out_kfree_dev:
-       kfree(dev);
-out_unregister_drv:
-       cpuidle_unregister_driver(drv);
 out_kfree_drv:
        kfree(drv);
        return ret;
@@ -178,9 +154,7 @@ out_fail:
        while (--cpu >= 0) {
                dev = per_cpu(cpuidle_devices, cpu);
                drv = cpuidle_get_cpu_driver(dev);
-               cpuidle_unregister_device(dev);
-               cpuidle_unregister_driver(drv);
-               kfree(dev);
+               cpuidle_unregister(drv);
                kfree(drv);
        }
 
index f7d6d690116ee8f32bada36c6b25520976c219c5..cdc4f9a171d986625352319d76ccf243e417410a 100644 (file)
@@ -732,6 +732,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
        int *splits_in_nents;
        int *splits_out_nents = NULL;
        struct sec_request_el *el, *temp;
+       bool split = skreq->src != skreq->dst;
 
        mutex_init(&sec_req->lock);
        sec_req->req_base = &skreq->base;
@@ -750,7 +751,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
        if (ret)
                goto err_free_split_sizes;
 
-       if (skreq->src != skreq->dst) {
+       if (split) {
                sec_req->len_out = sg_nents(skreq->dst);
                ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
                                           &splits_out, &splits_out_nents,
@@ -785,8 +786,9 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
                                               split_sizes[i],
                                               skreq->src != skreq->dst,
                                               splits_in[i], splits_in_nents[i],
-                                              splits_out[i],
-                                              splits_out_nents[i], info);
+                                              split ? splits_out[i] : NULL,
+                                              split ? splits_out_nents[i] : 0,
+                                              info);
                if (IS_ERR(el)) {
                        ret = PTR_ERR(el);
                        goto err_free_elements;
@@ -806,13 +808,6 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
         * more refined but this is unlikely to happen so no need.
         */
 
-       /* Cleanup - all elements in pointer arrays have been coppied */
-       kfree(splits_in_nents);
-       kfree(splits_in);
-       kfree(splits_out_nents);
-       kfree(splits_out);
-       kfree(split_sizes);
-
        /* Grab a big lock for a long time to avoid concurrency issues */
        mutex_lock(&queue->queuelock);
 
@@ -827,13 +822,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
             (!queue->havesoftqueue ||
              kfifo_avail(&queue->softqueue) > steps)) ||
            !list_empty(&ctx->backlog)) {
+               ret = -EBUSY;
                if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
                        list_add_tail(&sec_req->backlog_head, &ctx->backlog);
                        mutex_unlock(&queue->queuelock);
-                       return -EBUSY;
+                       goto out;
                }
 
-               ret = -EBUSY;
                mutex_unlock(&queue->queuelock);
                goto err_free_elements;
        }
@@ -842,7 +837,15 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
        if (ret)
                goto err_free_elements;
 
-       return -EINPROGRESS;
+       ret = -EINPROGRESS;
+out:
+       /* Cleanup - all elements in pointer arrays have been copied */
+       kfree(splits_in_nents);
+       kfree(splits_in);
+       kfree(splits_out_nents);
+       kfree(splits_out);
+       kfree(split_sizes);
+       return ret;
 
 err_free_elements:
        list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
@@ -854,7 +857,7 @@ err_free_elements:
                                 crypto_skcipher_ivsize(atfm),
                                 DMA_BIDIRECTIONAL);
 err_unmap_out_sg:
-       if (skreq->src != skreq->dst)
+       if (split)
                sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
                                    splits_out_nents, sec_req->len_out,
                                    info->dev);
index 388a929baf95d1e1107ab4b0ab20516dea3cfdf7..1a6a77df8a5e8aea45f3cbc2bac9c5d0883b0edb 100644 (file)
@@ -265,6 +265,10 @@ void __init efi_init(void)
                                    (params.mmap & ~PAGE_MASK)));
 
        init_screen_info();
+
+       /* ARM does not permit early mappings to persist across paging_init() */
+       if (IS_ENABLED(CONFIG_ARM))
+               efi_memmap_unmap();
 }
 
 static int __init register_gop_device(void)
index 922cfb813109a3c14a88a0bb054a09534e813ec9..a00934d263c519a9d476a57a5bb2388c6041b810 100644 (file)
@@ -110,7 +110,7 @@ static int __init arm_enable_runtime_services(void)
 {
        u64 mapsize;
 
-       if (!efi_enabled(EFI_BOOT) || !efi_enabled(EFI_MEMMAP)) {
+       if (!efi_enabled(EFI_BOOT)) {
                pr_info("EFI services will not be available.\n");
                return 0;
        }
index 249eb70691b0f5e7567cf4fc3bbb8dda9df571cf..fad7c62cfc0e422f3b8459653a561f4dee770a6e 100644 (file)
@@ -592,7 +592,11 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
 
                early_memunmap(tbl, sizeof(*tbl));
        }
+       return 0;
+}
 
+int __init efi_apply_persistent_mem_reservations(void)
+{
        if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) {
                unsigned long prsv = efi.mem_reserve;
 
@@ -963,36 +967,43 @@ bool efi_is_table_address(unsigned long phys_addr)
 }
 
 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
+static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
 
 int efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
 {
-       struct linux_efi_memreserve *rsv, *parent;
+       struct linux_efi_memreserve *rsv;
 
-       if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR)
+       if (!efi_memreserve_root)
                return -ENODEV;
 
-       rsv = kmalloc(sizeof(*rsv), GFP_KERNEL);
+       rsv = kmalloc(sizeof(*rsv), GFP_ATOMIC);
        if (!rsv)
                return -ENOMEM;
 
-       parent = memremap(efi.mem_reserve, sizeof(*rsv), MEMREMAP_WB);
-       if (!parent) {
-               kfree(rsv);
-               return -ENOMEM;
-       }
-
        rsv->base = addr;
        rsv->size = size;
 
        spin_lock(&efi_mem_reserve_persistent_lock);
-       rsv->next = parent->next;
-       parent->next = __pa(rsv);
+       rsv->next = efi_memreserve_root->next;
+       efi_memreserve_root->next = __pa(rsv);
        spin_unlock(&efi_mem_reserve_persistent_lock);
 
-       memunmap(parent);
+       return 0;
+}
 
+static int __init efi_memreserve_root_init(void)
+{
+       if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR)
+               return -ENODEV;
+
+       efi_memreserve_root = memremap(efi.mem_reserve,
+                                      sizeof(*efi_memreserve_root),
+                                      MEMREMAP_WB);
+       if (!efi_memreserve_root)
+               return -ENOMEM;
        return 0;
 }
+early_initcall(efi_memreserve_root_init);
 
 #ifdef CONFIG_KEXEC
 static int update_efi_random_seed(struct notifier_block *nb,
index 30ac0c975f8a1cc6bd544ba90991a4330ea05cd9..3d36142cf81208d408cab75e28edf957fa865c9e 100644 (file)
@@ -75,6 +75,9 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
        efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
        efi_status_t status;
 
+       if (IS_ENABLED(CONFIG_ARM))
+               return;
+
        status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
                                (void **)&rsv);
        if (status != EFI_SUCCESS) {
index 8830fa601e45d9a1b1094419cd1ec66f41a25e49..0c0d2312f4a8ad27f6e852bc82d5f2b6c0124e64 100644 (file)
@@ -158,6 +158,10 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
                        return efi_status;
                }
        }
+
+       /* shrink the FDT back to its minimum size */
+       fdt_pack(fdt);
+
        return EFI_SUCCESS;
 
 fdt_set_fail:
index fa2904fb841fe459a6562b9fea237c5213fa97be..38b686c67b177da4875b9174c0f50ca165c6b2d4 100644 (file)
@@ -118,6 +118,9 @@ int __init efi_memmap_init_early(struct efi_memory_map_data *data)
 
 void __init efi_memmap_unmap(void)
 {
+       if (!efi_enabled(EFI_MEMMAP))
+               return;
+
        if (!efi.memmap.late) {
                unsigned long size;
 
index a19d845bdb06748907972b652415594e18a095f9..8903b9ccfc2b8da6cdc5341fef8619744d904c88 100644 (file)
@@ -67,7 +67,7 @@ struct efi_runtime_work efi_rts_work;
        }                                                               \
                                                                        \
        init_completion(&efi_rts_work.efi_rts_comp);                    \
-       INIT_WORK_ONSTACK(&efi_rts_work.work, efi_call_rts);            \
+       INIT_WORK(&efi_rts_work.work, efi_call_rts);                    \
        efi_rts_work.arg1 = _arg1;                                      \
        efi_rts_work.arg2 = _arg2;                                      \
        efi_rts_work.arg3 = _arg3;                                      \
index 7c88f12096c5b5a94000878428b59b29a9d06211..1fafc2f8e8f9cd13f8d1ef6bc651941d19de594c 100644 (file)
@@ -11,7 +11,7 @@ drm-y       :=        drm_auth.o drm_bufs.o drm_cache.o \
                drm_sysfs.o drm_hashtab.o drm_mm.o \
                drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
                drm_encoder_slave.o \
-               drm_trace_points.o drm_global.o drm_prime.o \
+               drm_trace_points.o drm_prime.o \
                drm_rect.o drm_vma_manager.o drm_flip_work.o \
                drm_modeset_lock.o drm_atomic.o drm_bridge.o \
                drm_framebuffer.o drm_connector.o drm_blend.o \
index 138cb787d27e832dd773d87e293dce189239f5af..f76bcb9c45e488a07d1f17dc7125d831563c76fc 100644 (file)
@@ -53,7 +53,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
        amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
        amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
        amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
-       amdgpu_gmc.o amdgpu_xgmi.o
+       amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o
 
 # add asic specific block
 amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
@@ -105,6 +105,7 @@ amdgpu-y += \
 # add GFX block
 amdgpu-y += \
        amdgpu_gfx.o \
+       amdgpu_rlc.o \
        gfx_v8_0.o \
        gfx_v9_0.o
 
index d0102cfc8efbd1825df74c84ed26a0d0042a7e25..42f882c633eeebff07513f2ba60fb0d995aa6c65 100644 (file)
@@ -75,6 +75,7 @@
 #include "amdgpu_sdma.h"
 #include "amdgpu_dm.h"
 #include "amdgpu_virt.h"
+#include "amdgpu_csa.h"
 #include "amdgpu_gart.h"
 #include "amdgpu_debugfs.h"
 #include "amdgpu_job.h"
@@ -151,6 +152,7 @@ extern int amdgpu_compute_multipipe;
 extern int amdgpu_gpu_recovery;
 extern int amdgpu_emu_mode;
 extern uint amdgpu_smu_memory_pool_size;
+extern uint amdgpu_dc_feature_mask;
 extern struct amdgpu_mgpu_info mgpu_info;
 
 #ifdef CONFIG_DRM_AMDGPU_SI
@@ -432,7 +434,7 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
         * default non-graphics QWORD index is 0xe0 - 0xFF inclusive
         */
 
-       /* sDMA engines  reserved from 0xe0 -oxef  */
+       /* sDMA engines  reserved from 0xe0 -0xef  */
        AMDGPU_DOORBELL64_sDMA_ENGINE0            = 0xE0,
        AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0     = 0xE1,
        AMDGPU_DOORBELL64_sDMA_ENGINE1            = 0xE8,
@@ -830,7 +832,6 @@ struct amdgpu_device {
        bool                            need_dma32;
        bool                            need_swiotlb;
        bool                            accel_working;
-       struct work_struct              reset_work;
        struct notifier_block           acpi_nb;
        struct amdgpu_i2c_chan          *i2c_bus[AMDGPU_MAX_I2C_BUS];
        struct amdgpu_debugfs           debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
index c31a8849e9f87705ed3abac2ac23bfcbc7303d85..60f9a87e9c744c92a62f8f664cc6464b005615ac 100644 (file)
@@ -144,7 +144,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
                                  KGD_MAX_QUEUES);
 
                /* remove the KIQ bit as well */
-               if (adev->gfx.kiq.ring.ready)
+               if (adev->gfx.kiq.ring.sched.ready)
                        clear_bit(amdgpu_gfx_queue_to_bit(adev,
                                                          adev->gfx.kiq.ring.me - 1,
                                                          adev->gfx.kiq.ring.pipe,
@@ -268,9 +268,9 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
                amdgpu_device_gpu_recover(adev, NULL);
 }
 
-int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
-                       void **mem_obj, uint64_t *gpu_addr,
-                       void **cpu_ptr, bool mqd_gfx9)
+int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+                               void **mem_obj, uint64_t *gpu_addr,
+                               void **cpu_ptr, bool mqd_gfx9)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
        struct amdgpu_bo *bo = NULL;
@@ -340,7 +340,7 @@ allocate_mem_reserve_bo_failed:
        return r;
 }
 
-void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
+void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
 {
        struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
 
@@ -351,8 +351,8 @@ void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
        amdgpu_bo_unref(&(bo));
 }
 
-void get_local_mem_info(struct kgd_dev *kgd,
-                       struct kfd_local_mem_info *mem_info)
+void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
+                                     struct kfd_local_mem_info *mem_info)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
        uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
@@ -383,7 +383,7 @@ void get_local_mem_info(struct kgd_dev *kgd,
                mem_info->mem_clk_max = 100;
 }
 
-uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
+uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 
@@ -392,7 +392,7 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
        return 0;
 }
 
-uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
+uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 
@@ -405,7 +405,7 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
                return 100;
 }
 
-void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
+void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
        struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
index 8e0d4f7196b4f483818ad951df7f59e8e0b0f60f..bcf587b4ba9809f40f2efa5e8c6d6c97db207bd5 100644 (file)
@@ -134,16 +134,16 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
 void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
 
 /* Shared API */
-int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
-                       void **mem_obj, uint64_t *gpu_addr,
-                       void **cpu_ptr, bool mqd_gfx9);
-void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
-void get_local_mem_info(struct kgd_dev *kgd,
-                       struct kfd_local_mem_info *mem_info);
-uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
-
-uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
-void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
+int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+                               void **mem_obj, uint64_t *gpu_addr,
+                               void **cpu_ptr, bool mqd_gfx9);
+void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
+void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
+                                     struct kfd_local_mem_info *mem_info);
+uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd);
+
+uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
 uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
 uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
 
index 244d9834a3814381e638758a091b9f8287f8b57e..72a357dae07006d9ce46cd72df93d3b5cf201094 100644 (file)
@@ -173,13 +173,6 @@ static int get_tile_config(struct kgd_dev *kgd,
 }
 
 static const struct kfd2kgd_calls kfd2kgd = {
-       .init_gtt_mem_allocation = alloc_gtt_mem,
-       .free_gtt_mem = free_gtt_mem,
-       .get_local_mem_info = get_local_mem_info,
-       .get_gpu_clock_counter = get_gpu_clock_counter,
-       .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
-       .alloc_pasid = amdgpu_pasid_alloc,
-       .free_pasid = amdgpu_pasid_free,
        .program_sh_mem_settings = kgd_program_sh_mem_settings,
        .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
        .init_interrupts = kgd_init_interrupts,
@@ -200,28 +193,10 @@ static const struct kfd2kgd_calls kfd2kgd = {
        .get_fw_version = get_fw_version,
        .set_scratch_backing_va = set_scratch_backing_va,
        .get_tile_config = get_tile_config,
-       .get_cu_info = get_cu_info,
-       .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
-       .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
-       .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
-       .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
-       .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
-       .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
        .set_vm_context_page_table_base = set_vm_context_page_table_base,
-       .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
-       .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
-       .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
-       .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
-       .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
-       .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
-       .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
        .invalidate_tlbs = invalidate_tlbs,
        .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
-       .submit_ib = amdgpu_amdkfd_submit_ib,
-       .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info,
        .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
-       .gpu_recover = amdgpu_amdkfd_gpu_reset,
-       .set_compute_idle = amdgpu_amdkfd_set_compute_idle
 };
 
 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
index 9f149914ad6cd113343cdd6ec406fa240d9fe7c4..0e2a56b6a9b68ba557c149ddefaa41cfc482c783 100644 (file)
@@ -128,13 +128,6 @@ static int get_tile_config(struct kgd_dev *kgd,
 }
 
 static const struct kfd2kgd_calls kfd2kgd = {
-       .init_gtt_mem_allocation = alloc_gtt_mem,
-       .free_gtt_mem = free_gtt_mem,
-       .get_local_mem_info = get_local_mem_info,
-       .get_gpu_clock_counter = get_gpu_clock_counter,
-       .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
-       .alloc_pasid = amdgpu_pasid_alloc,
-       .free_pasid = amdgpu_pasid_free,
        .program_sh_mem_settings = kgd_program_sh_mem_settings,
        .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
        .init_interrupts = kgd_init_interrupts,
@@ -157,27 +150,9 @@ static const struct kfd2kgd_calls kfd2kgd = {
        .get_fw_version = get_fw_version,
        .set_scratch_backing_va = set_scratch_backing_va,
        .get_tile_config = get_tile_config,
-       .get_cu_info = get_cu_info,
-       .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
-       .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
-       .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
-       .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
-       .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
-       .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
        .set_vm_context_page_table_base = set_vm_context_page_table_base,
-       .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
-       .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
-       .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
-       .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
-       .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
-       .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
-       .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
        .invalidate_tlbs = invalidate_tlbs,
        .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
-       .submit_ib = amdgpu_amdkfd_submit_ib,
-       .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info,
-       .gpu_recover = amdgpu_amdkfd_gpu_reset,
-       .set_compute_idle = amdgpu_amdkfd_set_compute_idle
 };
 
 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
index 42cb4c4e0929150de17d2056dbb055883e642157..03b604c96d94e24618e5ef3b4baa741938c49204 100644 (file)
 #include "v9_structs.h"
 #include "soc15.h"
 #include "soc15d.h"
+#include "mmhub_v1_0.h"
+#include "gfxhub_v1_0.h"
 
-/* HACK: MMHUB and GC both have VM-related register with the same
- * names but different offsets. Define the MMHUB register we need here
- * with a prefix. A proper solution would be to move the functions
- * programming these registers into gfx_v9_0.c and mmhub_v1_0.c
- * respectively.
- */
-#define mmMMHUB_VM_INVALIDATE_ENG16_REQ                                0x06f3
-#define mmMMHUB_VM_INVALIDATE_ENG16_REQ_BASE_IDX               0
-
-#define mmMMHUB_VM_INVALIDATE_ENG16_ACK                                0x0705
-#define mmMMHUB_VM_INVALIDATE_ENG16_ACK_BASE_IDX               0
-
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32          0x072b
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32          0x072c
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
-
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32         0x074b
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX        0
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32         0x074c
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX        0
-
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32           0x076b
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX  0
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32           0x076c
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX  0
-
-#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32            0x0727
-#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX   0
-#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32            0x0728
-#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32_BASE_IDX   0
 
 #define V9_PIPE_PER_MEC                (4)
 #define V9_QUEUES_PER_PIPE_MEC (8)
@@ -167,13 +138,6 @@ static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
 }
 
 static const struct kfd2kgd_calls kfd2kgd = {
-       .init_gtt_mem_allocation = alloc_gtt_mem,
-       .free_gtt_mem = free_gtt_mem,
-       .get_local_mem_info = get_local_mem_info,
-       .get_gpu_clock_counter = get_gpu_clock_counter,
-       .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
-       .alloc_pasid = amdgpu_pasid_alloc,
-       .free_pasid = amdgpu_pasid_free,
        .program_sh_mem_settings = kgd_program_sh_mem_settings,
        .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
        .init_interrupts = kgd_init_interrupts,
@@ -196,26 +160,9 @@ static const struct kfd2kgd_calls kfd2kgd = {
        .get_fw_version = get_fw_version,
        .set_scratch_backing_va = set_scratch_backing_va,
        .get_tile_config = amdgpu_amdkfd_get_tile_config,
-       .get_cu_info = get_cu_info,
-       .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
-       .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
-       .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
-       .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
-       .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
-       .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
        .set_vm_context_page_table_base = set_vm_context_page_table_base,
-       .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
-       .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
-       .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
-       .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
-       .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
-       .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
-       .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
        .invalidate_tlbs = invalidate_tlbs,
        .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
-       .submit_ib = amdgpu_amdkfd_submit_ib,
-       .gpu_recover = amdgpu_amdkfd_gpu_reset,
-       .set_compute_idle = amdgpu_amdkfd_set_compute_idle,
        .get_hive_id = amdgpu_amdkfd_get_hive_id,
 };
 
@@ -785,15 +732,6 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
 static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-       uint32_t req = (1 << vmid) |
-               (0 << VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT) | /* legacy */
-               VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK |
-               VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK |
-               VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK |
-               VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK |
-               VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK;
-
-       mutex_lock(&adev->srbm_mutex);
 
        /* Use legacy mode tlb invalidation.
         *
@@ -810,34 +748,7 @@ static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
         * TODO 2: support range-based invalidation, requires kfg2kgd
         * interface change
         */
-       WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ADDR_RANGE_LO32),
-                               0xffffffff);
-       WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ADDR_RANGE_HI32),
-                               0x0000001f);
-
-       WREG32(SOC15_REG_OFFSET(MMHUB, 0,
-                               mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32),
-                               0xffffffff);
-       WREG32(SOC15_REG_OFFSET(MMHUB, 0,
-                               mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32),
-                               0x0000001f);
-
-       WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_REQ), req);
-
-       WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_INVALIDATE_ENG16_REQ),
-                               req);
-
-       while (!(RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ACK)) &
-                                       (1 << vmid)))
-               cpu_relax();
-
-       while (!(RREG32(SOC15_REG_OFFSET(MMHUB, 0,
-                                       mmMMHUB_VM_INVALIDATE_ENG16_ACK)) &
-                                       (1 << vmid)))
-               cpu_relax();
-
-       mutex_unlock(&adev->srbm_mutex);
-
+       amdgpu_gmc_flush_gpu_tlb(adev, vmid, 0);
 }
 
 static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
@@ -876,7 +787,7 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
        if (adev->in_gpu_reset)
                return -EIO;
 
-       if (ring->ready)
+       if (ring->sched.ready)
                return invalidate_tlbs_with_kiq(adev, pasid);
 
        for (vmid = 0; vmid < 16; vmid++) {
@@ -1016,7 +927,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
                uint64_t page_table_base)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
-       uint64_t base = page_table_base | AMDGPU_PTE_VALID;
 
        if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
                pr_err("trying to set page table base for wrong VMID %u\n",
@@ -1028,25 +938,7 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
         * now, all processes share the same address space size, like
         * on GFX8 and older.
         */
-       WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0);
-       WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0);
-
-       WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2),
-                       lower_32_bits(adev->vm_manager.max_pfn - 1));
-       WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2),
-                       upper_32_bits(adev->vm_manager.max_pfn - 1));
-
-       WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base));
-       WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base));
-
-       WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0);
-       WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0);
-
-       WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2),
-                       lower_32_bits(adev->vm_manager.max_pfn - 1));
-       WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2),
-                       upper_32_bits(adev->vm_manager.max_pfn - 1));
+       mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
 
-       WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base));
-       WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base));
+       gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
 }
index 8816c697b2053c7c28f119f1362443d7b9ad6e98..ceadeeadfa56aa618cb26f3cfdcdd13df863baff 100644 (file)
@@ -330,7 +330,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
                        case CHIP_TOPAZ:
                                if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
                                    ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
-                                   ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
+                                   ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)) ||
+                                   ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) ||
+                                   ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) {
                                        info->is_kicker = true;
                                        strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
                                } else
@@ -351,7 +353,6 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
                                if (type == CGS_UCODE_ID_SMU) {
                                        if (((adev->pdev->device == 0x67ef) &&
                                             ((adev->pdev->revision == 0xe0) ||
-                                             (adev->pdev->revision == 0xe2) ||
                                              (adev->pdev->revision == 0xe5))) ||
                                            ((adev->pdev->device == 0x67ff) &&
                                             ((adev->pdev->revision == 0xcf) ||
@@ -359,8 +360,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
                                              (adev->pdev->revision == 0xff)))) {
                                                info->is_kicker = true;
                                                strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
-                                       } else
+                                       } else if ((adev->pdev->device == 0x67ef) &&
+                                                  (adev->pdev->revision == 0xe2)) {
+                                               info->is_kicker = true;
+                                               strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
+                                       } else {
                                                strcpy(fw_name, "amdgpu/polaris11_smc.bin");
+                                       }
                                } else if (type == CGS_UCODE_ID_SMU_SK) {
                                        strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
                                }
@@ -378,14 +384,31 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
                                             (adev->pdev->revision == 0xef))) {
                                                info->is_kicker = true;
                                                strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
-                                       } else
+                                       } else if ((adev->pdev->device == 0x67df) &&
+                                                  ((adev->pdev->revision == 0xe1) ||
+                                                   (adev->pdev->revision == 0xf7))) {
+                                               info->is_kicker = true;
+                                               strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
+                                       } else {
                                                strcpy(fw_name, "amdgpu/polaris10_smc.bin");
+                                       }
                                } else if (type == CGS_UCODE_ID_SMU_SK) {
                                        strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
                                }
                                break;
                        case CHIP_POLARIS12:
-                               strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+                               if (((adev->pdev->device == 0x6987) &&
+                                    ((adev->pdev->revision == 0xc0) ||
+                                     (adev->pdev->revision == 0xc3))) ||
+                                   ((adev->pdev->device == 0x6981) &&
+                                    ((adev->pdev->revision == 0x00) ||
+                                     (adev->pdev->revision == 0x01) ||
+                                     (adev->pdev->revision == 0x10)))) {
+                                       info->is_kicker = true;
+                                       strcpy(fw_name, "amdgpu/polaris12_k_smc.bin");
+                               } else {
+                                       strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+                               }
                                break;
                        case CHIP_VEGAM:
                                strcpy(fw_name, "amdgpu/vegam_smc.bin");
index 35bc8fc3bc701d965291bda7435835175982c5e3..024dfbd87f118beaa4384711fc986a579668a86a 100644 (file)
@@ -1260,8 +1260,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
        return 0;
 
 error_abort:
-       dma_fence_put(&job->base.s_fence->finished);
-       job->base.s_fence = NULL;
+       drm_sched_job_cleanup(&job->base);
        amdgpu_mn_unlock(p->mn);
 
 error_unlock:
@@ -1285,7 +1284,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 
        r = amdgpu_cs_parser_init(&parser, data);
        if (r) {
-               DRM_ERROR("Failed to initialize parser !\n");
+               DRM_ERROR("Failed to initialize parser %d!\n", r);
                goto out;
        }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
new file mode 100644 (file)
index 0000000..0c590dd
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+
+ * * Author: Monk.liu@amd.com
+ */
+
+#include "amdgpu.h"
+
+uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
+{
+       uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
+
+       addr -= AMDGPU_VA_RESERVED_SIZE;
+       addr = amdgpu_gmc_sign_extend(addr);
+
+       return addr;
+}
+
+int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
+                               u32 domain, uint32_t size)
+{
+       int r;
+       void *ptr;
+
+       r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
+                               domain, bo,
+                               NULL, &ptr);
+       if (!bo)
+               return -ENOMEM;
+
+       memset(ptr, 0, size);
+       return 0;
+}
+
+void amdgpu_free_static_csa(struct amdgpu_bo **bo)
+{
+       amdgpu_bo_free_kernel(bo, NULL, NULL);
+}
+
+/*
+ * amdgpu_map_static_csa should be called during amdgpu_vm_init
+ * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
+ * submission of GFX should use this virtual address within META_DATA init
+ * package to support SRIOV gfx preemption.
+ */
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+                         struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
+                         uint64_t csa_addr, uint32_t size)
+{
+       struct ww_acquire_ctx ticket;
+       struct list_head list;
+       struct amdgpu_bo_list_entry pd;
+       struct ttm_validate_buffer csa_tv;
+       int r;
+
+       INIT_LIST_HEAD(&list);
+       INIT_LIST_HEAD(&csa_tv.head);
+       csa_tv.bo = &bo->tbo;
+       csa_tv.shared = true;
+
+       list_add(&csa_tv.head, &list);
+       amdgpu_vm_get_pd_bo(vm, &list, &pd);
+
+       r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+       if (r) {
+               DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
+               return r;
+       }
+
+       *bo_va = amdgpu_vm_bo_add(adev, vm, bo);
+       if (!*bo_va) {
+               ttm_eu_backoff_reservation(&ticket, &list);
+               DRM_ERROR("failed to create bo_va for static CSA\n");
+               return -ENOMEM;
+       }
+
+       r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
+                               size);
+       if (r) {
+               DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
+               amdgpu_vm_bo_rmv(adev, *bo_va);
+               ttm_eu_backoff_reservation(&ticket, &list);
+               return r;
+       }
+
+       r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
+                            AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
+                            AMDGPU_PTE_EXECUTABLE);
+
+       if (r) {
+               DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
+               amdgpu_vm_bo_rmv(adev, *bo_va);
+               ttm_eu_backoff_reservation(&ticket, &list);
+               return r;
+       }
+
+       ttm_eu_backoff_reservation(&ticket, &list);
+       return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h
new file mode 100644 (file)
index 0000000..524b443
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Monk.liu@amd.com
+ */
+
+#ifndef AMDGPU_CSA_MANAGER_H
+#define AMDGPU_CSA_MANAGER_H
+
+#define AMDGPU_CSA_SIZE                (128 * 1024)
+
+uint32_t amdgpu_get_total_csa_size(struct amdgpu_device *adev);
+uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev);
+int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
+                               u32 domain, uint32_t size);
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+                         struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
+                         uint64_t csa_addr, uint32_t size);
+void amdgpu_free_static_csa(struct amdgpu_bo **bo);
+
+#endif
index 30bc345d6fdf0d5827c2aa737d284da92787ea70..590588a82471fb1cea8049e46e196e912bb146cf 100644 (file)
@@ -1656,7 +1656,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
 
                        /* right after GMC hw init, we create CSA */
                        if (amdgpu_sriov_vf(adev)) {
-                               r = amdgpu_allocate_static_csa(adev);
+                               r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
+                                                               AMDGPU_GEM_DOMAIN_VRAM,
+                                                               AMDGPU_CSA_SIZE);
                                if (r) {
                                        DRM_ERROR("allocate CSA failed %d\n", r);
                                        return r;
@@ -1681,7 +1683,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
        if (r)
                return r;
 
-       amdgpu_xgmi_add_device(adev);
+       if (adev->gmc.xgmi.num_physical_nodes > 1)
+               amdgpu_xgmi_add_device(adev);
        amdgpu_amdkfd_device_init(adev);
 
        if (amdgpu_sriov_vf(adev))
@@ -1890,7 +1893,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
 
                if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
                        amdgpu_ucode_free_bo(adev);
-                       amdgpu_free_static_csa(adev);
+                       amdgpu_free_static_csa(&adev->virt.csa_obj);
                        amdgpu_device_wb_fini(adev);
                        amdgpu_device_vram_scratch_fini(adev);
                }
@@ -3295,13 +3298,35 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
                return false;
        }
 
-       if (amdgpu_gpu_recovery == 0 || (amdgpu_gpu_recovery == -1  &&
-                                        !amdgpu_sriov_vf(adev))) {
-               DRM_INFO("GPU recovery disabled.\n");
-               return false;
+       if (amdgpu_gpu_recovery == 0)
+               goto disabled;
+
+       if (amdgpu_sriov_vf(adev))
+               return true;
+
+       if (amdgpu_gpu_recovery == -1) {
+               switch (adev->asic_type) {
+               case CHIP_TOPAZ:
+               case CHIP_TONGA:
+               case CHIP_FIJI:
+               case CHIP_POLARIS10:
+               case CHIP_POLARIS11:
+               case CHIP_POLARIS12:
+               case CHIP_VEGAM:
+               case CHIP_VEGA20:
+               case CHIP_VEGA10:
+               case CHIP_VEGA12:
+                       break;
+               default:
+                       goto disabled;
+               }
        }
 
        return true;
+
+disabled:
+               DRM_INFO("GPU recovery disabled.\n");
+               return false;
 }
 
 /**
index 943dbf3c5da12ddb03439e3cc1ea002771e5ab14..8de55f7f1a3a3922b4a1ac2d17cf12cdd35d1fd6 100644 (file)
@@ -127,6 +127,9 @@ int amdgpu_compute_multipipe = -1;
 int amdgpu_gpu_recovery = -1; /* auto */
 int amdgpu_emu_mode = 0;
 uint amdgpu_smu_memory_pool_size = 0;
+/* FBC (bit 0) disabled by default*/
+uint amdgpu_dc_feature_mask = 0;
+
 struct amdgpu_mgpu_info mgpu_info = {
        .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
 };
@@ -631,6 +634,14 @@ module_param(halt_if_hws_hang, int, 0644);
 MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
 #endif
 
+/**
+ * DOC: dcfeaturemask (uint)
+ * Override display features enabled. See enum DC_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
+ * The default is the current set of stable display features.
+ */
+MODULE_PARM_DESC(dcfeaturemask, "all stable DC features enabled (default))");
+module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
+
 static const struct pci_device_id pciidlist[] = {
 #ifdef  CONFIG_DRM_AMDGPU_SI
        {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
index 5448cf27654ee61c09632c8f88626b54e4a5fb26..ee47c11e92ce7f021e7ce87d5613c7a9543eb900 100644 (file)
@@ -398,9 +398,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
        ring->fence_drv.irq_type = irq_type;
        ring->fence_drv.initialized = true;
 
-       dev_dbg(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
-               "cpu addr 0x%p\n", ring->idx,
-               ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
+       DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr "
+                     "0x%016llx, cpu addr 0x%p\n", ring->name,
+                     ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
        return 0;
 }
 
index 11fea28f8ad30da94a18a28763a498bb5f84868e..6d11e1721147e53f13c096cae6e2f5662b580cc8 100644 (file)
@@ -248,7 +248,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
        }
        mb();
        amdgpu_asic_flush_hdp(adev, NULL);
-       amdgpu_gmc_flush_gpu_tlb(adev, 0);
+       amdgpu_gmc_flush_gpu_tlb(adev, 0, 0);
        return 0;
 }
 
@@ -259,6 +259,8 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
  * @offset: offset into the GPU's gart aperture
  * @pages: number of pages to bind
  * @dma_addr: DMA addresses of pages
+ * @flags: page table entry flags
+ * @dst: CPU address of the gart table
  *
  * Map the dma_addresses into GART entries (all asics).
  * Returns 0 for success, -EINVAL for failure.
@@ -331,7 +333,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
 
        mb();
        amdgpu_asic_flush_hdp(adev, NULL);
-       amdgpu_gmc_flush_gpu_tlb(adev, 0);
+       amdgpu_gmc_flush_gpu_tlb(adev, 0, 0);
        return 0;
 }
 
index 9ff62887e4e32cd0378295720e7517d74b035efd..afa2e2877d87cf7065d7000a4a5ed451ca673e43 100644 (file)
@@ -41,6 +41,7 @@ struct amdgpu_bo;
 
 struct amdgpu_gart {
        struct amdgpu_bo                *bo;
+       /* CPU kmapped address of gart table */
        void                            *ptr;
        unsigned                        num_gpu_pages;
        unsigned                        num_cpu_pages;
index 1a656b8657f736fa0385aba0c54c6548d72af819..6a70c0b7105fb4372cf3ddb5fda4c04e7082e9ee 100644 (file)
@@ -25,6 +25,7 @@
 #include <drm/drmP.h>
 #include "amdgpu.h"
 #include "amdgpu_gfx.h"
+#include "amdgpu_rlc.h"
 
 /* delay 0.1 second to enable gfx off feature */
 #define GFX_OFF_DELAY_ENABLE         msecs_to_jiffies(100)
index b61b5c11aeadd3893c5ed8659aec88f1861bbb8c..f790e15bcd087901d784b7963ec20487ae4814fb 100644 (file)
@@ -29,6 +29,7 @@
  */
 #include "clearstate_defs.h"
 #include "amdgpu_ring.h"
+#include "amdgpu_rlc.h"
 
 /* GFX current status */
 #define AMDGPU_GFX_NORMAL_MODE                 0x00000000L
 #define AMDGPU_GFX_CG_DISABLED_MODE            0x00000004L
 #define AMDGPU_GFX_LBPW_DISABLED_MODE          0x00000008L
 
-
-struct amdgpu_rlc_funcs {
-       void (*enter_safe_mode)(struct amdgpu_device *adev);
-       void (*exit_safe_mode)(struct amdgpu_device *adev);
-};
-
-struct amdgpu_rlc {
-       /* for power gating */
-       struct amdgpu_bo        *save_restore_obj;
-       uint64_t                save_restore_gpu_addr;
-       volatile uint32_t       *sr_ptr;
-       const u32               *reg_list;
-       u32                     reg_list_size;
-       /* for clear state */
-       struct amdgpu_bo        *clear_state_obj;
-       uint64_t                clear_state_gpu_addr;
-       volatile uint32_t       *cs_ptr;
-       const struct cs_section_def   *cs_data;
-       u32                     clear_state_size;
-       /* for cp tables */
-       struct amdgpu_bo        *cp_table_obj;
-       uint64_t                cp_table_gpu_addr;
-       volatile uint32_t       *cp_table_ptr;
-       u32                     cp_table_size;
-
-       /* safe mode for updating CG/PG state */
-       bool in_safe_mode;
-       const struct amdgpu_rlc_funcs *funcs;
-
-       /* for firmware data */
-       u32 save_and_restore_offset;
-       u32 clear_state_descriptor_offset;
-       u32 avail_scratch_ram_locations;
-       u32 reg_restore_list_size;
-       u32 reg_list_format_start;
-       u32 reg_list_format_separate_start;
-       u32 starting_offsets_start;
-       u32 reg_list_format_size_bytes;
-       u32 reg_list_size_bytes;
-       u32 reg_list_format_direct_reg_list_length;
-       u32 save_restore_list_cntl_size_bytes;
-       u32 save_restore_list_gpm_size_bytes;
-       u32 save_restore_list_srm_size_bytes;
-
-       u32 *register_list_format;
-       u32 *register_restore;
-       u8 *save_restore_list_cntl;
-       u8 *save_restore_list_gpm;
-       u8 *save_restore_list_srm;
-
-       bool is_rlc_v2_1;
-};
-
 #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
 
 struct amdgpu_mec {
index 6fa7ef446e4631136df9c4b17fe6d510de8561eb..8c57924c075fa5e8a7dc69d6b6c06384f9f6e96d 100644 (file)
@@ -64,7 +64,7 @@ struct amdgpu_vmhub {
 struct amdgpu_gmc_funcs {
        /* flush the vm tlb via mmio */
        void (*flush_gpu_tlb)(struct amdgpu_device *adev,
-                             uint32_t vmid);
+                             uint32_t vmid, uint32_t flush_type);
        /* flush the vm tlb via ring */
        uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
                                       uint64_t pd_addr);
@@ -89,7 +89,7 @@ struct amdgpu_gmc_funcs {
 
 struct amdgpu_xgmi {
        /* from psp */
-       u64 device_id;
+       u64 node_id;
        u64 hive_id;
        /* fixed per family */
        u64 node_segment_size;
@@ -151,7 +151,7 @@ struct amdgpu_gmc {
        struct amdgpu_xgmi xgmi;
 };
 
-#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
+#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, type) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (type))
 #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
 #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
 #define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
index b8963b725dfa05baf9353f0859bde8af7c4e14f7..c48207b377bc5f5c64549eca69662896285971e5 100644 (file)
@@ -146,7 +146,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                fence_ctx = 0;
        }
 
-       if (!ring->ready) {
+       if (!ring->sched.ready) {
                dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
                return -EINVAL;
        }
@@ -221,8 +221,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                        !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
                        continue;
 
-               amdgpu_ring_emit_ib(ring, ib, job ? job->vmid : 0,
-                                   need_ctx_switch);
+               amdgpu_ring_emit_ib(ring, job, ib, need_ctx_switch);
                need_ctx_switch = false;
        }
 
@@ -347,19 +346,14 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
                tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
        }
 
-       for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+       for (i = 0; i < adev->num_rings; ++i) {
                struct amdgpu_ring *ring = adev->rings[i];
                long tmo;
 
-               if (!ring || !ring->ready)
-                       continue;
-
-               /* skip IB tests for KIQ in general for the below reasons:
-                * 1. We never submit IBs to the KIQ
-                * 2. KIQ doesn't use the EOP interrupts,
-                *    we use some other CP interrupt.
+               /* KIQ rings don't have an IB test because we never submit IBs
+                * to them and they have no interrupt support.
                 */
-               if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+               if (!ring->sched.ready || !ring->funcs->test_ib)
                        continue;
 
                /* MM engine need more time */
@@ -374,20 +368,23 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
                        tmo = tmo_gfx;
 
                r = amdgpu_ring_test_ib(ring, tmo);
-               if (r) {
-                       ring->ready = false;
-
-                       if (ring == &adev->gfx.gfx_ring[0]) {
-                               /* oh, oh, that's really bad */
-                               DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r);
-                               adev->accel_working = false;
-                               return r;
-
-                       } else {
-                               /* still not good, but we can live with it */
-                               DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
-                               ret = r;
-                       }
+               if (!r) {
+                       DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n",
+                                     ring->name);
+                       continue;
+               }
+
+               ring->sched.ready = false;
+               DRM_DEV_ERROR(adev->dev, "IB test failed on %s (%d).\n",
+                         ring->name, r);
+
+               if (ring == &adev->gfx.gfx_ring[0]) {
+                       /* oh, oh, that's really bad */
+                       adev->accel_working = false;
+                       return r;
+
+               } else {
+                       ret = r;
                }
        }
        return ret;
index 52c17f6219a706d2793d999eef39979342c5ab28..6b6524f04ce09246b906a9da07a4f8d8b767a417 100644 (file)
@@ -93,23 +93,6 @@ static void amdgpu_hotplug_work_func(struct work_struct *work)
        drm_helper_hpd_irq_event(dev);
 }
 
-/**
- * amdgpu_irq_reset_work_func - execute GPU reset
- *
- * @work: work struct pointer
- *
- * Execute scheduled GPU reset (Cayman+).
- * This function is called when the IRQ handler thinks we need a GPU reset.
- */
-static void amdgpu_irq_reset_work_func(struct work_struct *work)
-{
-       struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
-                                                 reset_work);
-
-       if (!amdgpu_sriov_vf(adev) && amdgpu_device_should_recover_gpu(adev))
-               amdgpu_device_gpu_recover(adev, NULL);
-}
-
 /**
  * amdgpu_irq_disable_all - disable *all* interrupts
  *
@@ -262,15 +245,12 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
                                amdgpu_hotplug_work_func);
        }
 
-       INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
-
        adev->irq.installed = true;
        r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
        if (r) {
                adev->irq.installed = false;
                if (!amdgpu_device_has_dc_support(adev))
                        flush_work(&adev->hotplug_work);
-               cancel_work_sync(&adev->reset_work);
                return r;
        }
        adev->ddev->max_vblank_count = 0x00ffffff;
@@ -299,7 +279,6 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
                        pci_disable_msi(adev->pdev);
                if (!amdgpu_device_has_dc_support(adev))
                        flush_work(&adev->hotplug_work);
-               cancel_work_sync(&adev->reset_work);
        }
 
        for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
index 755f733bf0d9517591f945e931a116cb0ae2c4ac..e0af44fd6a0cf7b52c266400510a857b74ea5efa 100644 (file)
@@ -112,6 +112,8 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
        struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
        struct amdgpu_job *job = to_amdgpu_job(s_job);
 
+       drm_sched_job_cleanup(s_job);
+
        amdgpu_ring_priority_put(ring, s_job->s_priority);
        dma_fence_put(job->fence);
        amdgpu_sync_free(&job->sync);
index 57cfe78a262b11e70eb037afda5755769932fabd..e1b46a6703ded9a60370bedf20bab108b19fd808 100644 (file)
@@ -33,6 +33,8 @@
 #define to_amdgpu_job(sched_job)               \
                container_of((sched_job), struct amdgpu_job, base)
 
+#define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
+
 struct amdgpu_fence;
 
 struct amdgpu_job {
index 81732a84c2ab090af4e2f834e2223c0eeabe2bcc..9b3164c0f861878273cf0ff8a0d0211a6911b5ed 100644 (file)
@@ -336,7 +336,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
        case AMDGPU_HW_IP_GFX:
                type = AMD_IP_BLOCK_TYPE_GFX;
                for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-                       if (adev->gfx.gfx_ring[i].ready)
+                       if (adev->gfx.gfx_ring[i].sched.ready)
                                ++num_rings;
                ib_start_alignment = 32;
                ib_size_alignment = 32;
@@ -344,7 +344,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
        case AMDGPU_HW_IP_COMPUTE:
                type = AMD_IP_BLOCK_TYPE_GFX;
                for (i = 0; i < adev->gfx.num_compute_rings; i++)
-                       if (adev->gfx.compute_ring[i].ready)
+                       if (adev->gfx.compute_ring[i].sched.ready)
                                ++num_rings;
                ib_start_alignment = 32;
                ib_size_alignment = 32;
@@ -352,7 +352,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
        case AMDGPU_HW_IP_DMA:
                type = AMD_IP_BLOCK_TYPE_SDMA;
                for (i = 0; i < adev->sdma.num_instances; i++)
-                       if (adev->sdma.instance[i].ring.ready)
+                       if (adev->sdma.instance[i].ring.sched.ready)
                                ++num_rings;
                ib_start_alignment = 256;
                ib_size_alignment = 4;
@@ -363,7 +363,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
                        if (adev->uvd.harvest_config & (1 << i))
                                continue;
 
-                       if (adev->uvd.inst[i].ring.ready)
+                       if (adev->uvd.inst[i].ring.sched.ready)
                                ++num_rings;
                }
                ib_start_alignment = 64;
@@ -372,7 +372,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
        case AMDGPU_HW_IP_VCE:
                type = AMD_IP_BLOCK_TYPE_VCE;
                for (i = 0; i < adev->vce.num_rings; i++)
-                       if (adev->vce.ring[i].ready)
+                       if (adev->vce.ring[i].sched.ready)
                                ++num_rings;
                ib_start_alignment = 4;
                ib_size_alignment = 1;
@@ -384,7 +384,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
                                continue;
 
                        for (j = 0; j < adev->uvd.num_enc_rings; j++)
-                               if (adev->uvd.inst[i].ring_enc[j].ready)
+                               if (adev->uvd.inst[i].ring_enc[j].sched.ready)
                                        ++num_rings;
                }
                ib_start_alignment = 64;
@@ -392,7 +392,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
                break;
        case AMDGPU_HW_IP_VCN_DEC:
                type = AMD_IP_BLOCK_TYPE_VCN;
-               if (adev->vcn.ring_dec.ready)
+               if (adev->vcn.ring_dec.sched.ready)
                        ++num_rings;
                ib_start_alignment = 16;
                ib_size_alignment = 16;
@@ -400,14 +400,14 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
        case AMDGPU_HW_IP_VCN_ENC:
                type = AMD_IP_BLOCK_TYPE_VCN;
                for (i = 0; i < adev->vcn.num_enc_rings; i++)
-                       if (adev->vcn.ring_enc[i].ready)
+                       if (adev->vcn.ring_enc[i].sched.ready)
                                ++num_rings;
                ib_start_alignment = 64;
                ib_size_alignment = 1;
                break;
        case AMDGPU_HW_IP_VCN_JPEG:
                type = AMD_IP_BLOCK_TYPE_VCN;
-               if (adev->vcn.ring_jpeg.ready)
+               if (adev->vcn.ring_jpeg.sched.ready)
                        ++num_rings;
                ib_start_alignment = 16;
                ib_size_alignment = 16;
@@ -978,7 +978,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
        }
 
        if (amdgpu_sriov_vf(adev)) {
-               r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
+               uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
+
+               r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
+                                               &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
                if (r)
                        goto error_vm;
        }
@@ -1048,8 +1051,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
        pasid = fpriv->vm.pasid;
        pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
 
-       amdgpu_vm_fini(adev, &fpriv->vm);
        amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
+       amdgpu_vm_fini(adev, &fpriv->vm);
 
        if (pasid)
                amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
index b9e9e8b02fb756a0d7291c605353d59cd228826e..11723d8fffbd67fd77591ec03685a732271daa56 100644 (file)
@@ -57,7 +57,6 @@ struct amdgpu_hpd;
 #define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base)
 #define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base)
 #define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base)
-#define to_amdgpu_plane(x)     container_of(x, struct amdgpu_plane, base)
 
 #define to_dm_plane_state(x)   container_of(x, struct dm_plane_state, base);
 
@@ -325,7 +324,7 @@ struct amdgpu_mode_info {
        struct card_info *atom_card_info;
        bool mode_config_initialized;
        struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
-       struct amdgpu_plane *planes[AMDGPU_MAX_PLANES];
+       struct drm_plane *planes[AMDGPU_MAX_PLANES];
        struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
        /* DVI-I properties */
        struct drm_property *coherent_mode_property;
@@ -434,11 +433,6 @@ struct amdgpu_crtc {
        struct drm_pending_vblank_event *event;
 };
 
-struct amdgpu_plane {
-       struct drm_plane base;
-       enum drm_plane_type plane_type;
-};
-
 struct amdgpu_encoder_atom_dig {
        bool linkb;
        /* atom dig */
index 59cc678de8c1570642afc2d488f63fbc179a1e99..7235cd0b0fa904be268c6b076b502aa0a0f85145 100644 (file)
@@ -2129,7 +2129,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
 
        for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
                struct amdgpu_ring *ring = adev->rings[i];
-               if (ring && ring->ready)
+               if (ring && ring->sched.ready)
                        amdgpu_fence_wait_empty(ring);
        }
 
index 25d2f3e757f1bdac06054172f3ad9db6ef5ea8fa..e05dc66b10909b1a3bd5562ce5285c789da2d496 100644 (file)
@@ -90,6 +90,8 @@ static int psp_sw_fini(void *handle)
        adev->psp.sos_fw = NULL;
        release_firmware(adev->psp.asd_fw);
        adev->psp.asd_fw = NULL;
+       release_firmware(adev->psp.ta_fw);
+       adev->psp.ta_fw = NULL;
        return 0;
 }
 
@@ -118,21 +120,25 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
 static int
 psp_cmd_submit_buf(struct psp_context *psp,
                   struct amdgpu_firmware_info *ucode,
-                  struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr,
-                  int index)
+                  struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
 {
        int ret;
+       int index;
 
        memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
 
        memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
 
+       index = atomic_inc_return(&psp->fence_value);
        ret = psp_cmd_submit(psp, ucode, psp->cmd_buf_mc_addr,
                             fence_mc_addr, index);
+       if (ret) {
+               atomic_dec(&psp->fence_value);
+               return ret;
+       }
 
-       while (*((unsigned int *)psp->fence_buf) != index) {
+       while (*((unsigned int *)psp->fence_buf) != index)
                msleep(1);
-       }
 
        /* the status field must be 0 after FW is loaded */
        if (ucode && psp->cmd_buf_mem->resp.status) {
@@ -191,7 +197,7 @@ static int psp_tmr_load(struct psp_context *psp)
                        PSP_TMR_SIZE, psp->tmr_mc_addr);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd,
-                                psp->fence_buf_mc_addr, 1);
+                                psp->fence_buf_mc_addr);
        if (ret)
                goto failed;
 
@@ -258,13 +264,194 @@ static int psp_asd_load(struct psp_context *psp)
                             psp->asd_ucode_size, PSP_ASD_SHARED_MEM_SIZE);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd,
-                                psp->fence_buf_mc_addr, 2);
+                                psp->fence_buf_mc_addr);
+
+       kfree(cmd);
+
+       return ret;
+}
+
+static void psp_prep_xgmi_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+                                         uint64_t xgmi_ta_mc, uint64_t xgmi_mc_shared,
+                                         uint32_t xgmi_ta_size, uint32_t shared_size)
+{
+        cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
+        cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(xgmi_ta_mc);
+        cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(xgmi_ta_mc);
+        cmd->cmd.cmd_load_ta.app_len = xgmi_ta_size;
+
+        cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(xgmi_mc_shared);
+        cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(xgmi_mc_shared);
+        cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
+}
+
+static int psp_xgmi_init_shared_buf(struct psp_context *psp)
+{
+       int ret;
+
+       /*
+        * Allocate 16k memory aligned to 4k from Frame Buffer (local
+        * physical) for xgmi ta <-> Driver
+        */
+       ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE,
+                                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+                                     &psp->xgmi_context.xgmi_shared_bo,
+                                     &psp->xgmi_context.xgmi_shared_mc_addr,
+                                     &psp->xgmi_context.xgmi_shared_buf);
+
+       return ret;
+}
+
+static int psp_xgmi_load(struct psp_context *psp)
+{
+       int ret;
+       struct psp_gfx_cmd_resp *cmd;
+
+       /*
+        * TODO: bypass the loading in sriov for now
+        */
+       if (amdgpu_sriov_vf(psp->adev))
+               return 0;
+
+       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+       if (!cmd)
+               return -ENOMEM;
+
+       memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+       memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
+
+       psp_prep_xgmi_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
+                                     psp->xgmi_context.xgmi_shared_mc_addr,
+                                     psp->ta_xgmi_ucode_size, PSP_XGMI_SHARED_MEM_SIZE);
+
+       ret = psp_cmd_submit_buf(psp, NULL, cmd,
+                                psp->fence_buf_mc_addr);
+
+       if (!ret) {
+               psp->xgmi_context.initialized = 1;
+               psp->xgmi_context.session_id = cmd->resp.session_id;
+       }
+
+       kfree(cmd);
+
+       return ret;
+}
+
+static void psp_prep_xgmi_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+                                           uint32_t xgmi_session_id)
+{
+       cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
+       cmd->cmd.cmd_unload_ta.session_id = xgmi_session_id;
+}
+
+static int psp_xgmi_unload(struct psp_context *psp)
+{
+       int ret;
+       struct psp_gfx_cmd_resp *cmd;
+
+       /*
+        * TODO: bypass the unloading in sriov for now
+        */
+       if (amdgpu_sriov_vf(psp->adev))
+               return 0;
+
+       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+       if (!cmd)
+               return -ENOMEM;
+
+       psp_prep_xgmi_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
+
+       ret = psp_cmd_submit_buf(psp, NULL, cmd,
+                                psp->fence_buf_mc_addr);
 
        kfree(cmd);
 
        return ret;
 }
 
+static void psp_prep_xgmi_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+                                           uint32_t ta_cmd_id,
+                                           uint32_t xgmi_session_id)
+{
+       cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
+       cmd->cmd.cmd_invoke_cmd.session_id = xgmi_session_id;
+       cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
+       /* Note: cmd_invoke_cmd.buf is not used for now */
+}
+
+int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+       int ret;
+       struct psp_gfx_cmd_resp *cmd;
+
+       /*
+        * TODO: bypass the loading in sriov for now
+       */
+       if (amdgpu_sriov_vf(psp->adev))
+               return 0;
+
+       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+       if (!cmd)
+               return -ENOMEM;
+
+       psp_prep_xgmi_ta_invoke_cmd_buf(cmd, ta_cmd_id,
+                                       psp->xgmi_context.session_id);
+
+       ret = psp_cmd_submit_buf(psp, NULL, cmd,
+                                psp->fence_buf_mc_addr);
+
+       kfree(cmd);
+
+        return ret;
+}
+
+static int psp_xgmi_terminate(struct psp_context *psp)
+{
+       int ret;
+
+       if (!psp->xgmi_context.initialized)
+               return 0;
+
+       ret = psp_xgmi_unload(psp);
+       if (ret)
+               return ret;
+
+       psp->xgmi_context.initialized = 0;
+
+       /* free xgmi shared memory */
+       amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo,
+                       &psp->xgmi_context.xgmi_shared_mc_addr,
+                       &psp->xgmi_context.xgmi_shared_buf);
+
+       return 0;
+}
+
+static int psp_xgmi_initialize(struct psp_context *psp)
+{
+       struct ta_xgmi_shared_memory *xgmi_cmd;
+       int ret;
+
+       if (!psp->xgmi_context.initialized) {
+               ret = psp_xgmi_init_shared_buf(psp);
+               if (ret)
+                       return ret;
+       }
+
+       /* Load XGMI TA */
+       ret = psp_xgmi_load(psp);
+       if (ret)
+               return ret;
+
+       /* Initialize XGMI session */
+       xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf);
+       memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+       xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
+
+       ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+
+       return ret;
+}
+
 static int psp_hw_start(struct psp_context *psp)
 {
        struct amdgpu_device *adev = psp->adev;
@@ -292,6 +479,15 @@ static int psp_hw_start(struct psp_context *psp)
        if (ret)
                return ret;
 
+       if (adev->gmc.xgmi.num_physical_nodes > 1) {
+               ret = psp_xgmi_initialize(psp);
+               /* Warning the XGMI seesion initialize failure
+                * Instead of stop driver initialization
+                */
+               if (ret)
+                       dev_err(psp->adev->dev,
+                               "XGMI: Failed to initialize XGMI session\n");
+       }
        return 0;
 }
 
@@ -321,7 +517,7 @@ static int psp_np_fw_load(struct psp_context *psp)
                        return ret;
 
                ret = psp_cmd_submit_buf(psp, ucode, psp->cmd,
-                                        psp->fence_buf_mc_addr, i + 3);
+                                        psp->fence_buf_mc_addr);
                if (ret)
                        return ret;
 
@@ -452,6 +648,10 @@ static int psp_hw_fini(void *handle)
        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
                return 0;
 
+       if (adev->gmc.xgmi.num_physical_nodes > 1 &&
+           psp->xgmi_context.initialized == 1)
+                psp_xgmi_terminate(psp);
+
        psp_ring_destroy(psp, PSP_RING_TYPE__KM);
 
        amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
@@ -479,6 +679,15 @@ static int psp_suspend(void *handle)
        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
                return 0;
 
+       if (adev->gmc.xgmi.num_physical_nodes > 1 &&
+           psp->xgmi_context.initialized == 1) {
+               ret = psp_xgmi_terminate(psp);
+               if (ret) {
+                       DRM_ERROR("Failed to terminate xgmi ta\n");
+                       return ret;
+               }
+       }
+
        ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
        if (ret) {
                DRM_ERROR("PSP ring stop failed\n");
index 8b8720e9c3f0211b8e1aee4377aa787fb0e4dea0..9ec5d1a666a6dbe082aa7ba46394fa9a5704f28e 100644 (file)
 
 #include "amdgpu.h"
 #include "psp_gfx_if.h"
+#include "ta_xgmi_if.h"
 
 #define PSP_FENCE_BUFFER_SIZE  0x1000
 #define PSP_CMD_BUFFER_SIZE    0x1000
-#define PSP_ASD_SHARED_MEM_SIZE        0x4000
+#define PSP_ASD_SHARED_MEM_SIZE 0x4000
+#define PSP_XGMI_SHARED_MEM_SIZE 0x4000
 #define PSP_1_MEG              0x100000
 #define PSP_TMR_SIZE   0x400000
 
 struct psp_context;
+struct psp_xgmi_node_info;
 struct psp_xgmi_topology_info;
 
 enum psp_ring_type
@@ -80,12 +83,20 @@ struct psp_funcs
                                  enum AMDGPU_UCODE_ID ucode_type);
        bool (*smu_reload_quirk)(struct psp_context *psp);
        int (*mode1_reset)(struct psp_context *psp);
-       uint64_t (*xgmi_get_device_id)(struct psp_context *psp);
+       uint64_t (*xgmi_get_node_id)(struct psp_context *psp);
        uint64_t (*xgmi_get_hive_id)(struct psp_context *psp);
        int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices,
-                       struct psp_xgmi_topology_info *topology);
+                                     struct psp_xgmi_topology_info *topology);
        int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices,
-                       struct psp_xgmi_topology_info *topology);
+                                     struct psp_xgmi_topology_info *topology);
+};
+
+struct psp_xgmi_context {
+       uint8_t                         initialized;
+       uint32_t                        session_id;
+       struct amdgpu_bo                *xgmi_shared_bo;
+       uint64_t                        xgmi_shared_mc_addr;
+       void                            *xgmi_shared_buf;
 };
 
 struct psp_context
@@ -96,7 +107,7 @@ struct psp_context
 
        const struct psp_funcs          *funcs;
 
-       /* fence buffer */
+       /* firmware buffer */
        struct amdgpu_bo                *fw_pri_bo;
        uint64_t                        fw_pri_mc_addr;
        void                            *fw_pri_buf;
@@ -134,6 +145,16 @@ struct psp_context
        struct amdgpu_bo                *cmd_buf_bo;
        uint64_t                        cmd_buf_mc_addr;
        struct psp_gfx_cmd_resp         *cmd_buf_mem;
+
+       /* fence value associated with cmd buffer */
+       atomic_t                        fence_value;
+
+       /* xgmi ta firmware and buffer */
+       const struct firmware           *ta_fw;
+       uint32_t                        ta_xgmi_ucode_version;
+       uint32_t                        ta_xgmi_ucode_size;
+       uint8_t                         *ta_xgmi_start_addr;
+       struct psp_xgmi_context         xgmi_context;
 };
 
 struct amdgpu_psp_funcs {
@@ -141,21 +162,17 @@ struct amdgpu_psp_funcs {
                                        enum AMDGPU_UCODE_ID);
 };
 
+#define AMDGPU_XGMI_MAX_CONNECTED_NODES                64
+struct psp_xgmi_node_info {
+       uint64_t                                node_id;
+       uint8_t                                 num_hops;
+       uint8_t                                 is_sharing_enabled;
+       enum ta_xgmi_assigned_sdma_engine       sdma_engine;
+};
+
 struct psp_xgmi_topology_info {
-       /* Generated by PSP to identify the GPU instance within xgmi connection */
-       uint64_t                        device_id;
-       /*
-        * If all bits set to 0 , driver indicates it wants to retrieve the xgmi
-        * connection vector topology, but not access enable the connections
-        * if some or all bits are set to 1, driver indicates it want to retrieve the
-        * current xgmi topology and  access enable the link to GPU[i] associated
-        * with the bit position in the  vector.
-        * On return,: bits indicated which xgmi links are present/active depending
-        * on the  value passed in. The relative bit offset for the  relative GPU index
-        * within the  hive is always marked active.
-        */
-       uint32_t                        connection_mask;
-       uint32_t                        reserved; /* must be  0 */
+       uint32_t                        num_nodes;
+       struct psp_xgmi_node_info       nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES];
 };
 
 #define psp_prep_cmd_buf(ucode, type) (psp)->funcs->prep_cmd_buf((ucode), (type))
@@ -177,8 +194,8 @@ struct psp_xgmi_topology_info {
                ((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false)
 #define psp_mode1_reset(psp) \
                ((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false)
-#define psp_xgmi_get_device_id(psp) \
-               ((psp)->funcs->xgmi_get_device_id ? (psp)->funcs->xgmi_get_device_id((psp)) : 0)
+#define psp_xgmi_get_node_id(psp) \
+               ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp)) : 0)
 #define psp_xgmi_get_hive_id(psp) \
                ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp)) : 0)
 #define psp_xgmi_get_topology_info(psp, num_device, topology) \
@@ -199,6 +216,8 @@ extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
 extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
 
 int psp_gpu_reset(struct amdgpu_device *adev);
+int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+
 extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
 
 #endif
index b70e85ec147d54d4784be1b9b66639be9ff26d8a..5b75bdc8dc28f673957067f36268ea7fb6eae8e1 100644 (file)
@@ -338,7 +338,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
  */
 void amdgpu_ring_fini(struct amdgpu_ring *ring)
 {
-       ring->ready = false;
+       ring->sched.ready = false;
 
        /* Not to finish a ring which is not initialized */
        if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
@@ -500,3 +500,29 @@ static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
        debugfs_remove(ring->ent);
 #endif
 }
+
+/**
+ * amdgpu_ring_test_helper - tests ring and set sched readiness status
+ *
+ * @ring: ring to try the recovery on
+ *
+ * Tests ring and set sched readiness status
+ *
+ * Returns 0 on success, error on failure.
+ */
+int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       int r;
+
+       r = amdgpu_ring_test_ring(ring);
+       if (r)
+               DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n",
+                             ring->name, r);
+       else
+               DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n",
+                             ring->name);
+
+       ring->sched.ready = !r;
+       return r;
+}
index 4caa301ce454884b9ebe980b3b554c19e2932be0..0beb01fef83fd38c9b940c167fa7df7d25df1f71 100644 (file)
@@ -129,8 +129,9 @@ struct amdgpu_ring_funcs {
        unsigned emit_ib_size;
        /* command emit functions */
        void (*emit_ib)(struct amdgpu_ring *ring,
+                       struct amdgpu_job *job,
                        struct amdgpu_ib *ib,
-                       unsigned vmid, bool ctx_switch);
+                       bool ctx_switch);
        void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
                           uint64_t seq, unsigned flags);
        void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
@@ -189,7 +190,6 @@ struct amdgpu_ring {
        uint64_t                gpu_addr;
        uint64_t                ptr_mask;
        uint32_t                buf_mask;
-       bool                    ready;
        u32                     idx;
        u32                     me;
        u32                     pipe;
@@ -229,7 +229,7 @@ struct amdgpu_ring {
 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
-#define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c))
+#define amdgpu_ring_emit_ib(r, job, ib, c) ((r)->funcs->emit_ib((r), (job), (ib), (c)))
 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
@@ -313,4 +313,6 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
        ring->count_dw -= count_dw;
 }
 
+int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
+
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
new file mode 100644 (file)
index 0000000..c8793e6
--- /dev/null
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_gfx.h"
+#include "amdgpu_rlc.h"
+
+/**
+ * amdgpu_gfx_rlc_enter_safe_mode - Set RLC into safe mode
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set RLC enter into safe mode if RLC is enabled and haven't in safe mode.
+ */
+void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev)
+{
+       if (adev->gfx.rlc.in_safe_mode)
+               return;
+
+       /* if RLC is not enabled, do nothing */
+       if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
+               return;
+
+       if (adev->cg_flags &
+           (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
+            AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+               adev->gfx.rlc.funcs->set_safe_mode(adev);
+               adev->gfx.rlc.in_safe_mode = true;
+       }
+}
+
+/**
+ * amdgpu_gfx_rlc_exit_safe_mode - Set RLC out of safe mode
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set RLC exit safe mode if RLC is enabled and have entered into safe mode.
+ */
+void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev)
+{
+       if (!(adev->gfx.rlc.in_safe_mode))
+               return;
+
+       /* if RLC is not enabled, do nothing */
+       if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
+               return;
+
+       if (adev->cg_flags &
+           (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
+            AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+               adev->gfx.rlc.funcs->unset_safe_mode(adev);
+               adev->gfx.rlc.in_safe_mode = false;
+       }
+}
+
+/**
+ * amdgpu_gfx_rlc_init_sr - Init save restore block
+ *
+ * @adev: amdgpu_device pointer
+ * @dws: the size of save restore block
+ *
+ * Allocate and setup value to save restore block of rlc.
+ * Returns 0 on succeess or negative error code if allocate failed.
+ */
+int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
+{
+       const u32 *src_ptr;
+       volatile u32 *dst_ptr;
+       u32 i;
+       int r;
+
+       /* allocate save restore block */
+       r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+                                     AMDGPU_GEM_DOMAIN_VRAM,
+                                     &adev->gfx.rlc.save_restore_obj,
+                                     &adev->gfx.rlc.save_restore_gpu_addr,
+                                     (void **)&adev->gfx.rlc.sr_ptr);
+       if (r) {
+               dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
+               amdgpu_gfx_rlc_fini(adev);
+               return r;
+       }
+
+       /* write the sr buffer */
+       src_ptr = adev->gfx.rlc.reg_list;
+       dst_ptr = adev->gfx.rlc.sr_ptr;
+       for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
+               dst_ptr[i] = cpu_to_le32(src_ptr[i]);
+       amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
+       amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+
+       return 0;
+}
+
+/**
+ * amdgpu_gfx_rlc_init_csb - Init clear state block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate and setup value to clear state block of rlc.
+ * Returns 0 on succeess or negative error code if allocate failed.
+ */
+int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
+{
+       volatile u32 *dst_ptr;
+       u32 dws;
+       int r;
+
+       /* allocate clear state block */
+       adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
+       r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+                                     AMDGPU_GEM_DOMAIN_VRAM,
+                                     &adev->gfx.rlc.clear_state_obj,
+                                     &adev->gfx.rlc.clear_state_gpu_addr,
+                                     (void **)&adev->gfx.rlc.cs_ptr);
+       if (r) {
+               dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r);
+               amdgpu_gfx_rlc_fini(adev);
+               return r;
+       }
+
+       /* set up the cs buffer */
+       dst_ptr = adev->gfx.rlc.cs_ptr;
+       adev->gfx.rlc.funcs->get_csb_buffer(adev, dst_ptr);
+       amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+       amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+       amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+
+       return 0;
+}
+
+/**
+ * amdgpu_gfx_rlc_init_cpt - Init cp table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate and setup value to cp table of rlc.
+ * Returns 0 on succeess or negative error code if allocate failed.
+ */
+int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
+{
+       int r;
+
+       r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
+                                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+                                     &adev->gfx.rlc.cp_table_obj,
+                                     &adev->gfx.rlc.cp_table_gpu_addr,
+                                     (void **)&adev->gfx.rlc.cp_table_ptr);
+       if (r) {
+               dev_err(adev->dev, "(%d) failed to create cp table bo\n", r);
+               amdgpu_gfx_rlc_fini(adev);
+               return r;
+       }
+
+       /* set up the cp table */
+       amdgpu_gfx_rlc_setup_cp_table(adev);
+       amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
+       amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+       return 0;
+}
+
+/**
+ * amdgpu_gfx_rlc_setup_cp_table - setup cp the buffer of cp table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Write cp firmware data into cp table.
+ */
+void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
+{
+       const __le32 *fw_data;
+       volatile u32 *dst_ptr;
+       int me, i, max_me;
+       u32 bo_offset = 0;
+       u32 table_offset, table_size;
+
+       max_me = adev->gfx.rlc.funcs->get_cp_table_num(adev);
+
+       /* write the cp table buffer */
+       dst_ptr = adev->gfx.rlc.cp_table_ptr;
+       for (me = 0; me < max_me; me++) {
+               if (me == 0) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.ce_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               } else if (me == 1) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.pfp_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               } else if (me == 2) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.me_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               } else if (me == 3) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.mec_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               } else  if (me == 4) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.mec2_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               }
+
+               for (i = 0; i < table_size; i ++) {
+                       dst_ptr[bo_offset + i] =
+                               cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+               }
+
+               bo_offset += table_size;
+       }
+}
+
+/**
+ * amdgpu_gfx_rlc_fini - Free BO which used for RLC
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Free three BO which is used for rlc_save_restore_block, rlc_clear_state_block
+ * and rlc_jump_table_block.
+ */
+void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev)
+{
+       /* save restore block */
+       if (adev->gfx.rlc.save_restore_obj) {
+               amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj,
+                                     &adev->gfx.rlc.save_restore_gpu_addr,
+                                     (void **)&adev->gfx.rlc.sr_ptr);
+       }
+
+       /* clear state block */
+       amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+                             &adev->gfx.rlc.clear_state_gpu_addr,
+                             (void **)&adev->gfx.rlc.cs_ptr);
+
+       /* jump table block */
+       amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+                             &adev->gfx.rlc.cp_table_gpu_addr,
+                             (void **)&adev->gfx.rlc.cp_table_ptr);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
new file mode 100644 (file)
index 0000000..49a8ab5
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_RLC_H__
+#define __AMDGPU_RLC_H__
+
+#include "clearstate_defs.h"
+
+struct amdgpu_rlc_funcs {
+       bool (*is_rlc_enabled)(struct amdgpu_device *adev);
+       void (*set_safe_mode)(struct amdgpu_device *adev);
+       void (*unset_safe_mode)(struct amdgpu_device *adev);
+       int  (*init)(struct amdgpu_device *adev);
+       u32  (*get_csb_size)(struct amdgpu_device *adev);
+       void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer);
+       int  (*get_cp_table_num)(struct amdgpu_device *adev);
+       int  (*resume)(struct amdgpu_device *adev);
+       void (*stop)(struct amdgpu_device *adev);
+       void (*reset)(struct amdgpu_device *adev);
+       void (*start)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_rlc {
+       /* for power gating */
+       struct amdgpu_bo        *save_restore_obj;
+       uint64_t                save_restore_gpu_addr;
+       volatile uint32_t       *sr_ptr;
+       const u32               *reg_list;
+       u32                     reg_list_size;
+       /* for clear state */
+       struct amdgpu_bo        *clear_state_obj;
+       uint64_t                clear_state_gpu_addr;
+       volatile uint32_t       *cs_ptr;
+       const struct cs_section_def   *cs_data;
+       u32                     clear_state_size;
+       /* for cp tables */
+       struct amdgpu_bo        *cp_table_obj;
+       uint64_t                cp_table_gpu_addr;
+       volatile uint32_t       *cp_table_ptr;
+       u32                     cp_table_size;
+
+       /* safe mode for updating CG/PG state */
+       bool in_safe_mode;
+       const struct amdgpu_rlc_funcs *funcs;
+
+       /* for firmware data */
+       u32 save_and_restore_offset;
+       u32 clear_state_descriptor_offset;
+       u32 avail_scratch_ram_locations;
+       u32 reg_restore_list_size;
+       u32 reg_list_format_start;
+       u32 reg_list_format_separate_start;
+       u32 starting_offsets_start;
+       u32 reg_list_format_size_bytes;
+       u32 reg_list_size_bytes;
+       u32 reg_list_format_direct_reg_list_length;
+       u32 save_restore_list_cntl_size_bytes;
+       u32 save_restore_list_gpm_size_bytes;
+       u32 save_restore_list_srm_size_bytes;
+
+       u32 *register_list_format;
+       u32 *register_restore;
+       u8 *save_restore_list_cntl;
+       u8 *save_restore_list_gpm;
+       u8 *save_restore_list_srm;
+
+       bool is_rlc_v2_1;
+};
+
+void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev);
+int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws);
+int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev);
+int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev);
+
+#endif
index bc9244b429ef11932b87ea96a191ad5af3e60d24..115bb0c99b0ff603ac0124280bf48e1a89f10d8e 100644 (file)
  * GPU SDMA IP block helpers function.
  */
 
-struct amdgpu_sdma_instance * amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
+struct amdgpu_sdma_instance *amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
        int i;
 
        for (i = 0; i < adev->sdma.num_instances; i++)
-               if (&adev->sdma.instance[i].ring == ring)
-                       break;
+               if (ring == &adev->sdma.instance[i].ring ||
+                   ring == &adev->sdma.instance[i].page)
+                       return &adev->sdma.instance[i];
 
-       if (i < AMDGPU_MAX_SDMA_INSTANCES)
-               return &adev->sdma.instance[i];
-       else
-               return NULL;
+       return NULL;
+}
+
+int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index)
+{
+       struct amdgpu_device *adev = ring->adev;
+       int i;
+
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               if (ring == &adev->sdma.instance[i].ring ||
+                       ring == &adev->sdma.instance[i].page) {
+                       *index = i;
+                       return 0;
+               }
+       }
+
+       return -EINVAL;
 }
index 500113ec65caaf63a3ba6cc6d1f385b211bf34dc..16b1a6ae5ba6bcb23b42069ccb27954d116a614b 100644 (file)
@@ -41,6 +41,7 @@ struct amdgpu_sdma_instance {
        uint32_t                feature_version;
 
        struct amdgpu_ring      ring;
+       struct amdgpu_ring      page;
        bool                    burst_nop;
 };
 
@@ -50,6 +51,7 @@ struct amdgpu_sdma {
        struct amdgpu_irq_src   illegal_inst_irq;
        int                     num_instances;
        uint32_t                    srbm_soft_reset;
+       bool                    has_page_queue;
 };
 
 /*
@@ -92,6 +94,7 @@ struct amdgpu_buffer_funcs {
 #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
 
 struct amdgpu_sdma_instance *
-amdgpu_get_sdma_instance(struct amdgpu_ring *ring);
+amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring);
+int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index);
 
 #endif
index e9bf70e2ac5139152cf8e3596cb5713cbd0afd05..626abca770a0367b853000358b580fdc7c82ff22 100644 (file)
@@ -218,6 +218,7 @@ TRACE_EVENT(amdgpu_vm_grab_id,
            TP_ARGS(vm, ring, job),
            TP_STRUCT__entry(
                             __field(u32, pasid)
+                            __string(ring, ring->name)
                             __field(u32, ring)
                             __field(u32, vmid)
                             __field(u32, vm_hub)
@@ -227,14 +228,14 @@ TRACE_EVENT(amdgpu_vm_grab_id,
 
            TP_fast_assign(
                           __entry->pasid = vm->pasid;
-                          __entry->ring = ring->idx;
+                          __assign_str(ring, ring->name)
                           __entry->vmid = job->vmid;
                           __entry->vm_hub = ring->funcs->vmhub,
                           __entry->pd_addr = job->vm_pd_addr;
                           __entry->needs_flush = job->vm_needs_flush;
                           ),
-           TP_printk("pasid=%d, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
-                     __entry->pasid, __entry->ring, __entry->vmid,
+           TP_printk("pasid=%d, ring=%s, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
+                     __entry->pasid, __get_str(ring), __entry->vmid,
                      __entry->vm_hub, __entry->pd_addr, __entry->needs_flush)
 );
 
@@ -366,20 +367,20 @@ TRACE_EVENT(amdgpu_vm_flush,
                     uint64_t pd_addr),
            TP_ARGS(ring, vmid, pd_addr),
            TP_STRUCT__entry(
-                            __field(u32, ring)
+                            __string(ring, ring->name)
                             __field(u32, vmid)
                             __field(u32, vm_hub)
                             __field(u64, pd_addr)
                             ),
 
            TP_fast_assign(
-                          __entry->ring = ring->idx;
+                          __assign_str(ring, ring->name)
                           __entry->vmid = vmid;
                           __entry->vm_hub = ring->funcs->vmhub;
                           __entry->pd_addr = pd_addr;
                           ),
-           TP_printk("ring=%u, id=%u, hub=%u, pd_addr=%010Lx",
-                     __entry->ring, __entry->vmid,
+           TP_printk("ring=%s, id=%u, hub=%u, pd_addr=%010Lx",
+                     __get_str(ring), __entry->vmid,
                      __entry->vm_hub,__entry->pd_addr)
 );
 
index a44fc12ae1f9eba9eaa7460f536c00f008eaf481..c91ec3101d00b5d06e48db30c71b3cf938d95b63 100644 (file)
@@ -61,100 +61,6 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
 static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
 
-/*
- * Global memory.
- */
-
-/**
- * amdgpu_ttm_mem_global_init - Initialize and acquire reference to
- * memory object
- *
- * @ref: Object for initialization.
- *
- * This is called by drm_global_item_ref() when an object is being
- * initialized.
- */
-static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-       return ttm_mem_global_init(ref->object);
-}
-
-/**
- * amdgpu_ttm_mem_global_release - Drop reference to a memory object
- *
- * @ref: Object being removed
- *
- * This is called by drm_global_item_unref() when an object is being
- * released.
- */
-static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-       ttm_mem_global_release(ref->object);
-}
-
-/**
- * amdgpu_ttm_global_init - Initialize global TTM memory reference structures.
- *
- * @adev: AMDGPU device for which the global structures need to be registered.
- *
- * This is called as part of the AMDGPU ttm init from amdgpu_ttm_init()
- * during bring up.
- */
-static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
-{
-       struct drm_global_reference *global_ref;
-       int r;
-
-       /* ensure reference is false in case init fails */
-       adev->mman.mem_global_referenced = false;
-
-       global_ref = &adev->mman.mem_global_ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-       global_ref->size = sizeof(struct ttm_mem_global);
-       global_ref->init = &amdgpu_ttm_mem_global_init;
-       global_ref->release = &amdgpu_ttm_mem_global_release;
-       r = drm_global_item_ref(global_ref);
-       if (r) {
-               DRM_ERROR("Failed setting up TTM memory accounting "
-                         "subsystem.\n");
-               goto error_mem;
-       }
-
-       adev->mman.bo_global_ref.mem_glob =
-               adev->mman.mem_global_ref.object;
-       global_ref = &adev->mman.bo_global_ref.ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_BO;
-       global_ref->size = sizeof(struct ttm_bo_global);
-       global_ref->init = &ttm_bo_global_init;
-       global_ref->release = &ttm_bo_global_release;
-       r = drm_global_item_ref(global_ref);
-       if (r) {
-               DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-               goto error_bo;
-       }
-
-       mutex_init(&adev->mman.gtt_window_lock);
-
-       adev->mman.mem_global_referenced = true;
-
-       return 0;
-
-error_bo:
-       drm_global_item_unref(&adev->mman.mem_global_ref);
-error_mem:
-       return r;
-}
-
-static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
-{
-       if (adev->mman.mem_global_referenced) {
-               mutex_destroy(&adev->mman.gtt_window_lock);
-               drm_global_item_unref(&adev->mman.bo_global_ref.ref);
-               drm_global_item_unref(&adev->mman.mem_global_ref);
-               adev->mman.mem_global_referenced = false;
-       }
-}
-
 static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
 {
        return 0;
@@ -1758,14 +1664,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
        int r;
        u64 vis_vram_limit;
 
-       /* initialize global references for vram/gtt */
-       r = amdgpu_ttm_global_init(adev);
-       if (r) {
-               return r;
-       }
+       mutex_init(&adev->mman.gtt_window_lock);
+
        /* No others user of address space so set it to 0 */
        r = ttm_bo_device_init(&adev->mman.bdev,
-                              adev->mman.bo_global_ref.ref.object,
                               &amdgpu_bo_driver,
                               adev->ddev->anon_inode->i_mapping,
                               DRM_FILE_PAGE_OFFSET,
@@ -1922,7 +1824,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
        ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
        ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
        ttm_bo_device_release(&adev->mman.bdev);
-       amdgpu_ttm_global_fini(adev);
        adev->mman.initialized = false;
        DRM_INFO("amdgpu: ttm finalized\n");
 }
@@ -2069,7 +1970,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
        unsigned i;
        int r;
 
-       if (direct_submit && !ring->ready) {
+       if (direct_submit && !ring->sched.ready) {
                DRM_ERROR("Trying to move memory with ring turned off.\n");
                return -EINVAL;
        }
index fe8f276e9811c02e1bdee63c136e7bcf81afc876..b5b2d101f7db295bc36d7dd2493fbb95e0716d2e 100644 (file)
@@ -39,8 +39,6 @@
 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS        2
 
 struct amdgpu_mman {
-       struct ttm_bo_global_ref        bo_global_ref;
-       struct drm_global_reference     mem_global_ref;
        struct ttm_bo_device            bdev;
        bool                            mem_global_referenced;
        bool                            initialized;
index aa6641b944a085de9f70638e9d68410995de6680..7ac25a1c78530b67f0c013b75470abb9dd338010 100644 (file)
@@ -57,6 +57,17 @@ struct psp_firmware_header_v1_0 {
        uint32_t sos_size_bytes;
 };
 
+/* version_major=1, version_minor=0 */
+struct ta_firmware_header_v1_0 {
+       struct common_firmware_header header;
+       uint32_t ta_xgmi_ucode_version;
+       uint32_t ta_xgmi_offset_bytes;
+       uint32_t ta_xgmi_size_bytes;
+       uint32_t ta_ras_ucode_version;
+       uint32_t ta_ras_offset_bytes;
+       uint32_t ta_ras_size_bytes;
+};
+
 /* version_major=1, version_minor=0 */
 struct gfx_firmware_header_v1_0 {
        struct common_firmware_header header;
@@ -170,6 +181,7 @@ union amdgpu_firmware_header {
        struct mc_firmware_header_v1_0 mc;
        struct smc_firmware_header_v1_0 smc;
        struct psp_firmware_header_v1_0 psp;
+       struct ta_firmware_header_v1_0 ta;
        struct gfx_firmware_header_v1_0 gfx;
        struct rlc_firmware_header_v1_0 rlc;
        struct rlc_firmware_header_v2_0 rlc_v2_0;
index e5a6db6beab7acfa364fedac5d0ac538458a4570..69896f451e8a63da686fa1bbc8f84c2cfe1d7877 100644 (file)
@@ -1243,30 +1243,20 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
        struct dma_fence *fence;
        long r;
-       uint32_t ip_instance = ring->me;
 
        r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
-       if (r) {
-               DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ip_instance, r);
+       if (r)
                goto error;
-       }
 
        r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
-       if (r) {
-               DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ip_instance, r);
+       if (r)
                goto error;
-       }
 
        r = dma_fence_wait_timeout(fence, false, timeout);
-       if (r == 0) {
-               DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ip_instance);
+       if (r == 0)
                r = -ETIMEDOUT;
-       } else if (r < 0) {
-               DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ip_instance, r);
-       } else {
-               DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ip_instance, ring->idx);
+       else if (r > 0)
                r = 0;
-       }
 
        dma_fence_put(fence);
 
index 5f3f540738187c6db03a7975bced71ea4163c9e0..98a1b2ce2b9d38cec778dc2c9a8097661eca27b0 100644 (file)
@@ -1032,8 +1032,10 @@ out:
  * @ib: the IB to execute
  *
  */
-void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
-                            unsigned vmid, bool ctx_switch)
+void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring,
+                               struct amdgpu_job *job,
+                               struct amdgpu_ib *ib,
+                               bool ctx_switch)
 {
        amdgpu_ring_write(ring, VCE_CMD_IB);
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
@@ -1079,11 +1081,9 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
                return 0;
 
        r = amdgpu_ring_alloc(ring, 16);
-       if (r) {
-               DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
-                         ring->idx, r);
+       if (r)
                return r;
-       }
+
        amdgpu_ring_write(ring, VCE_CMD_END);
        amdgpu_ring_commit(ring);
 
@@ -1093,14 +1093,8 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
                DRM_UDELAY(1);
        }
 
-       if (i < timeout) {
-               DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-                        ring->idx, i);
-       } else {
-               DRM_ERROR("amdgpu: ring %d test failed\n",
-                         ring->idx);
+       if (i >= timeout)
                r = -ETIMEDOUT;
-       }
 
        return r;
 }
@@ -1121,27 +1115,19 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
                return 0;
 
        r = amdgpu_vce_get_create_msg(ring, 1, NULL);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+       if (r)
                goto error;
-       }
 
        r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+       if (r)
                goto error;
-       }
 
        r = dma_fence_wait_timeout(fence, false, timeout);
-       if (r == 0) {
-               DRM_ERROR("amdgpu: IB test timed out.\n");
+       if (r == 0)
                r = -ETIMEDOUT;
-       } else if (r < 0) {
-               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
-       } else {
-               DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+       else if (r > 0)
                r = 0;
-       }
+
 error:
        dma_fence_put(fence);
        return r;
index a1f209eed4c477498ee932a0f02e47fdff993137..50293652af148cc3c8fa2d04d8b7723e8b744e32 100644 (file)
@@ -65,8 +65,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
-void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
-                            unsigned vmid, bool ctx_switch);
+void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
+                               struct amdgpu_ib *ib, bool ctx_switch);
 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
                                unsigned flags);
 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
index 27da13df2f113b72a959156f187c013252d3319e..e2e42e3fbcf3365c659fc6281626e0adc6cf40e0 100644 (file)
@@ -425,11 +425,9 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
 
        WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
        r = amdgpu_ring_alloc(ring, 3);
-       if (r) {
-               DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
-                         ring->idx, r);
+       if (r)
                return r;
-       }
+
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0));
        amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -441,14 +439,9 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
                DRM_UDELAY(1);
        }
 
-       if (i < adev->usec_timeout) {
-               DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-                        ring->idx, i);
-       } else {
-               DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-                         ring->idx, tmp);
-               r = -EINVAL;
-       }
+       if (i >= adev->usec_timeout)
+               r = -ETIMEDOUT;
+
        return r;
 }
 
@@ -570,30 +563,20 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        long r;
 
        r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+       if (r)
                goto error;
-       }
 
        r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+       if (r)
                goto error;
-       }
 
        r = dma_fence_wait_timeout(fence, false, timeout);
-       if (r == 0) {
-               DRM_ERROR("amdgpu: IB test timed out.\n");
+       if (r == 0)
                r = -ETIMEDOUT;
-       } else if (r < 0) {
-               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
-       } else {
-               DRM_DEBUG("ib test on ring %d succeeded\n",  ring->idx);
+       else if (r > 0)
                r = 0;
-       }
 
        dma_fence_put(fence);
-
 error:
        return r;
 }
@@ -606,11 +589,9 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
        int r;
 
        r = amdgpu_ring_alloc(ring, 16);
-       if (r) {
-               DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n",
-                         ring->idx, r);
+       if (r)
                return r;
-       }
+
        amdgpu_ring_write(ring, VCN_ENC_CMD_END);
        amdgpu_ring_commit(ring);
 
@@ -620,14 +601,8 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
                DRM_UDELAY(1);
        }
 
-       if (i < adev->usec_timeout) {
-               DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-                        ring->idx, i);
-       } else {
-               DRM_ERROR("amdgpu: ring %d test failed\n",
-                         ring->idx);
+       if (i >= adev->usec_timeout)
                r = -ETIMEDOUT;
-       }
 
        return r;
 }
@@ -742,27 +717,19 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        long r;
 
        r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+       if (r)
                goto error;
-       }
 
        r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+       if (r)
                goto error;
-       }
 
        r = dma_fence_wait_timeout(fence, false, timeout);
-       if (r == 0) {
-               DRM_ERROR("amdgpu: IB test timed out.\n");
+       if (r == 0)
                r = -ETIMEDOUT;
-       } else if (r < 0) {
-               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
-       } else {
-               DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+       else if (r > 0)
                r = 0;
-       }
+
 error:
        dma_fence_put(fence);
        return r;
@@ -778,11 +745,8 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
        WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
        r = amdgpu_ring_alloc(ring, 3);
 
-       if (r) {
-               DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
-                                 ring->idx, r);
+       if (r)
                return r;
-       }
 
        amdgpu_ring_write(ring,
                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, 0));
@@ -796,14 +760,8 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
                DRM_UDELAY(1);
        }
 
-       if (i < adev->usec_timeout) {
-               DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-                                 ring->idx, i);
-       } else {
-               DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-                                 ring->idx, tmp);
-               r = -EINVAL;
-       }
+       if (i >= adev->usec_timeout)
+               r = -ETIMEDOUT;
 
        return r;
 }
@@ -856,21 +814,18 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        long r = 0;
 
        r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to set jpeg register (%ld).\n", r);
+       if (r)
                goto error;
-       }
 
        r = dma_fence_wait_timeout(fence, false, timeout);
        if (r == 0) {
-               DRM_ERROR("amdgpu: IB test timed out.\n");
                r = -ETIMEDOUT;
                goto error;
        } else if (r < 0) {
-               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
                goto error;
-       } else
+       } else {
                r = 0;
+       }
 
        for (i = 0; i < adev->usec_timeout; i++) {
                tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
@@ -879,15 +834,10 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
                DRM_UDELAY(1);
        }
 
-       if (i < adev->usec_timeout)
-               DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
-       else {
-               DRM_ERROR("ib test failed (0x%08X)\n", tmp);
-               r = -EINVAL;
-       }
+       if (i >= adev->usec_timeout)
+               r = -ETIMEDOUT;
 
        dma_fence_put(fence);
-
 error:
        return r;
 }
index f2f358aa059717194fd02fe41fbfc0c04bc0b26a..cfee74732edb8b8fe384db71b45100570e0649b6 100644 (file)
 
 #include "amdgpu.h"
 
-uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
-{
-       uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
-
-       addr -= AMDGPU_VA_RESERVED_SIZE;
-       addr = amdgpu_gmc_sign_extend(addr);
-
-       return addr;
-}
-
 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
 {
        /* By now all MMIO pages except mailbox are blocked */
@@ -41,88 +31,6 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
        return RREG32_NO_KIQ(0xc040) == 0xffffffff;
 }
 
-int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
-{
-       int r;
-       void *ptr;
-
-       r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
-                               AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
-                               &adev->virt.csa_vmid0_addr, &ptr);
-       if (r)
-               return r;
-
-       memset(ptr, 0, AMDGPU_CSA_SIZE);
-       return 0;
-}
-
-void amdgpu_free_static_csa(struct amdgpu_device *adev) {
-       amdgpu_bo_free_kernel(&adev->virt.csa_obj,
-                                               &adev->virt.csa_vmid0_addr,
-                                               NULL);
-}
-
-/*
- * amdgpu_map_static_csa should be called during amdgpu_vm_init
- * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
- * submission of GFX should use this virtual address within META_DATA init
- * package to support SRIOV gfx preemption.
- */
-int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                         struct amdgpu_bo_va **bo_va)
-{
-       uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
-       struct ww_acquire_ctx ticket;
-       struct list_head list;
-       struct amdgpu_bo_list_entry pd;
-       struct ttm_validate_buffer csa_tv;
-       int r;
-
-       INIT_LIST_HEAD(&list);
-       INIT_LIST_HEAD(&csa_tv.head);
-       csa_tv.bo = &adev->virt.csa_obj->tbo;
-       csa_tv.shared = true;
-
-       list_add(&csa_tv.head, &list);
-       amdgpu_vm_get_pd_bo(vm, &list, &pd);
-
-       r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
-       if (r) {
-               DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
-               return r;
-       }
-
-       *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
-       if (!*bo_va) {
-               ttm_eu_backoff_reservation(&ticket, &list);
-               DRM_ERROR("failed to create bo_va for static CSA\n");
-               return -ENOMEM;
-       }
-
-       r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
-                               AMDGPU_CSA_SIZE);
-       if (r) {
-               DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
-               amdgpu_vm_bo_rmv(adev, *bo_va);
-               ttm_eu_backoff_reservation(&ticket, &list);
-               return r;
-       }
-
-       r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE,
-                            AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
-                            AMDGPU_PTE_EXECUTABLE);
-
-       if (r) {
-               DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
-               amdgpu_vm_bo_rmv(adev, *bo_va);
-               ttm_eu_backoff_reservation(&ticket, &list);
-               return r;
-       }
-
-       ttm_eu_backoff_reservation(&ticket, &list);
-       return 0;
-}
-
 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
 {
        /* enable virtual display */
@@ -162,9 +70,7 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
        if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
                goto failed_kiq_read;
 
-       if (in_interrupt())
-               might_sleep();
-
+       might_sleep();
        while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
                msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
                r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
@@ -210,9 +116,7 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
        if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
                goto failed_kiq_write;
 
-       if (in_interrupt())
-               might_sleep();
-
+       might_sleep();
        while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
 
                msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
@@ -228,6 +132,46 @@ failed_kiq_write:
        pr_err("failed to write reg:%x\n", reg);
 }
 
+void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
+                                       uint32_t reg0, uint32_t reg1,
+                                       uint32_t ref, uint32_t mask)
+{
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+       struct amdgpu_ring *ring = &kiq->ring;
+       signed long r, cnt = 0;
+       unsigned long flags;
+       uint32_t seq;
+
+       spin_lock_irqsave(&kiq->ring_lock, flags);
+       amdgpu_ring_alloc(ring, 32);
+       amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
+                                           ref, mask);
+       amdgpu_fence_emit_polling(ring, &seq);
+       amdgpu_ring_commit(ring);
+       spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+       r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+       /* don't wait anymore for IRQ context */
+       if (r < 1 && in_interrupt())
+               goto failed_kiq;
+
+       might_sleep();
+       while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+
+               msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+               r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+       }
+
+       if (cnt > MAX_KIQ_REG_TRY)
+               goto failed_kiq;
+
+       return;
+
+failed_kiq:
+       pr_err("failed to write reg %x wait reg %x\n", reg0, reg1);
+}
+
 /**
  * amdgpu_virt_request_full_gpu() - request full gpu access
  * @amdgpu:    amdgpu device.
index 880ac113a3a9f38d64179dc4311a981939564e4f..0728fbc9a692fc8f29c6a8781f60dd0b12878ed4 100644 (file)
@@ -238,7 +238,6 @@ typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ;
 struct amdgpu_virt {
        uint32_t                        caps;
        struct amdgpu_bo                *csa_obj;
-       uint64_t                        csa_vmid0_addr;
        bool chained_ib_support;
        uint32_t                        reg_val_offs;
        struct amdgpu_irq_src           ack_irq;
@@ -251,8 +250,6 @@ struct amdgpu_virt {
        uint32_t gim_feature;
 };
 
-#define AMDGPU_CSA_SIZE                (8 * 1024)
-
 #define amdgpu_sriov_enabled(adev) \
 ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
 
@@ -277,17 +274,13 @@ static inline bool is_virtual_machine(void)
 #endif
 }
 
-struct amdgpu_vm;
-
-uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev);
 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
-int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
-int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                         struct amdgpu_bo_va **bo_va);
-void amdgpu_free_static_csa(struct amdgpu_device *adev);
 void amdgpu_virt_init_setting(struct amdgpu_device *adev);
 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
+void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
+                                       uint32_t reg0, uint32_t rreg1,
+                                       uint32_t ref, uint32_t mask);
 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
index d6c47972062afee121116c3f9995eeb8bfe3a3df..58a2363040ddca5cbfe2ff1891f1617fb47d073d 100644 (file)
@@ -1632,13 +1632,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
                        continue;
                }
 
-               /* First check if the entry is already handled */
-               if (cursor.pfn < frag_start) {
-                       cursor.entry->huge = true;
-                       amdgpu_vm_pt_next(adev, &cursor);
-                       continue;
-               }
-
                /* If it isn't already handled it can't be a huge page */
                if (cursor.entry->huge) {
                        /* Add the entry to the relocated list to update it. */
@@ -1701,8 +1694,17 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
                        }
                } while (frag_start < entry_end);
 
-               if (frag >= shift)
+               if (amdgpu_vm_pt_descendant(adev, &cursor)) {
+                       /* Mark all child entries as huge */
+                       while (cursor.pfn < frag_start) {
+                               cursor.entry->huge = true;
+                               amdgpu_vm_pt_next(adev, &cursor);
+                       }
+
+               } else if (frag >= shift) {
+                       /* or just move on to the next on the same level. */
                        amdgpu_vm_pt_next(adev, &cursor);
+               }
        }
 
        return 0;
index 897afbb348c1cb8db5e9164e16f019a36a3661b5..909216a9b447ba799e871bc4e5deec4f4f37b336 100644 (file)
@@ -63,7 +63,7 @@ static struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
 
 int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
 {
-       struct psp_xgmi_topology_info tmp_topology[AMDGPU_MAX_XGMI_DEVICE_PER_HIVE];
+       struct psp_xgmi_topology_info *tmp_topology;
        struct amdgpu_hive_info *hive;
        struct amdgpu_xgmi      *entry;
        struct amdgpu_device    *tmp_adev;
@@ -73,10 +73,12 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
        if ((adev->asic_type < CHIP_VEGA20) ||
                (adev->flags & AMD_IS_APU) )
                return 0;
-       adev->gmc.xgmi.device_id = psp_xgmi_get_device_id(&adev->psp);
+       adev->gmc.xgmi.node_id = psp_xgmi_get_node_id(&adev->psp);
        adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp);
 
-       memset(&tmp_topology[0], 0, sizeof(tmp_topology));
+       tmp_topology = kzalloc(sizeof(struct psp_xgmi_topology_info), GFP_KERNEL);
+       if (!tmp_topology)
+               return -ENOMEM;
        mutex_lock(&xgmi_mutex);
        hive = amdgpu_get_xgmi_hive(adev);
        if (!hive)
@@ -84,23 +86,28 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
 
        list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
        list_for_each_entry(entry, &hive->device_list, head)
-               tmp_topology[count++].device_id = entry->device_id;
-
-       ret = psp_xgmi_get_topology_info(&adev->psp, count, tmp_topology);
-       if (ret) {
-               dev_err(adev->dev,
-                       "XGMI: Get topology failure on device %llx, hive %llx, ret %d",
-                       adev->gmc.xgmi.device_id,
-                       adev->gmc.xgmi.hive_id, ret);
-               goto exit;
+               tmp_topology->nodes[count++].node_id = entry->node_id;
+
+       /* Each psp need to get the latest topology */
+       list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+               ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, tmp_topology);
+               if (ret) {
+                       dev_err(tmp_adev->dev,
+                               "XGMI: Get topology failure on device %llx, hive %llx, ret %d",
+                               tmp_adev->gmc.xgmi.node_id,
+                               tmp_adev->gmc.xgmi.hive_id, ret);
+                       /* To do : continue with some node failed or disable the whole hive */
+                       break;
+               }
        }
+
        /* Each psp need to set the latest topology */
        list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
                ret = psp_xgmi_set_topology_info(&tmp_adev->psp, count, tmp_topology);
                if (ret) {
                        dev_err(tmp_adev->dev,
                                "XGMI: Set topology failure on device %llx, hive %llx, ret %d",
-                               tmp_adev->gmc.xgmi.device_id,
+                               tmp_adev->gmc.xgmi.node_id,
                                tmp_adev->gmc.xgmi.hive_id, ret);
                        /* To do : continue with some  node failed or disable the  whole  hive */
                        break;
@@ -113,7 +120,6 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
 
 exit:
        mutex_unlock(&xgmi_mutex);
+       kfree(tmp_topology);
        return ret;
 }
-
-
index 79220a91abe3d48c32ef68c007efc07ee98b1638..86e14c754dd4755163f88f97b7b8a89079bc3fbe 100644 (file)
@@ -743,19 +743,19 @@ static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
 
        if (pi->caps_sq_ramping || pi->caps_db_ramping ||
            pi->caps_td_ramping || pi->caps_tcp_ramping) {
-               adev->gfx.rlc.funcs->enter_safe_mode(adev);
+               amdgpu_gfx_rlc_enter_safe_mode(adev);
 
                if (enable) {
                        ret = ci_program_pt_config_registers(adev, didt_config_ci);
                        if (ret) {
-                               adev->gfx.rlc.funcs->exit_safe_mode(adev);
+                               amdgpu_gfx_rlc_exit_safe_mode(adev);
                                return ret;
                        }
                }
 
                ci_do_enable_didt(adev, enable);
 
-               adev->gfx.rlc.funcs->exit_safe_mode(adev);
+               amdgpu_gfx_rlc_exit_safe_mode(adev);
        }
 
        return 0;
index b918c8886b75c4104d2fc5b03c7863bf9a4d6e41..45795191de1ff9865153ba04f926aef38500cc22 100644 (file)
@@ -198,7 +198,7 @@ static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
 
 static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 {
-       struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+       struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
        int i;
 
        for (i = 0; i < count; i++)
@@ -218,9 +218,11 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  * Schedule an IB in the DMA ring (CIK).
  */
 static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
+                                 struct amdgpu_job *job,
                                  struct amdgpu_ib *ib,
-                                 unsigned vmid, bool ctx_switch)
+                                 bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
        u32 extra_bits = vmid & 0xf;
 
        /* IB packet must end on a 8 DW boundary */
@@ -316,8 +318,8 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
                WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
        }
-       sdma0->ready = false;
-       sdma1->ready = false;
+       sdma0->sched.ready = false;
+       sdma1->sched.ready = false;
 }
 
 /**
@@ -494,18 +496,16 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
                /* enable DMA IBs */
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
-               ring->ready = true;
+               ring->sched.ready = true;
        }
 
        cik_sdma_enable(adev, true);
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                ring = &adev->sdma.instance[i].ring;
-               r = amdgpu_ring_test_ring(ring);
-               if (r) {
-                       ring->ready = false;
+               r = amdgpu_ring_test_helper(ring);
+               if (r)
                        return r;
-               }
 
                if (adev->mman.buffer_funcs_ring == ring)
                        amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -618,21 +618,17 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
        u64 gpu_addr;
 
        r = amdgpu_device_wb_get(adev, &index);
-       if (r) {
-               dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+       if (r)
                return r;
-       }
 
        gpu_addr = adev->wb.gpu_addr + (index * 4);
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
 
        r = amdgpu_ring_alloc(ring, 5);
-       if (r) {
-               DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
-               amdgpu_device_wb_free(adev, index);
-               return r;
-       }
+       if (r)
+               goto error_free_wb;
+
        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
        amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
        amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
@@ -647,15 +643,11 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
                DRM_UDELAY(1);
        }
 
-       if (i < adev->usec_timeout) {
-               DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
-       } else {
-               DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-                         ring->idx, tmp);
-               r = -EINVAL;
-       }
-       amdgpu_device_wb_free(adev, index);
+       if (i >= adev->usec_timeout)
+               r = -ETIMEDOUT;
 
+error_free_wb:
+       amdgpu_device_wb_free(adev, index);
        return r;
 }
 
@@ -678,20 +670,16 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        long r;
 
        r = amdgpu_device_wb_get(adev, &index);
-       if (r) {
-               dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+       if (r)
                return r;
-       }
 
        gpu_addr = adev->wb.gpu_addr + (index * 4);
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
        memset(&ib, 0, sizeof(ib));
        r = amdgpu_ib_get(adev, NULL, 256, &ib);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+       if (r)
                goto err0;
-       }
 
        ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE,
                                SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
@@ -706,21 +694,16 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
        r = dma_fence_wait_timeout(f, false, timeout);
        if (r == 0) {
-               DRM_ERROR("amdgpu: IB test timed out\n");
                r = -ETIMEDOUT;
                goto err1;
        } else if (r < 0) {
-               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
                goto err1;
        }
        tmp = le32_to_cpu(adev->wb.wb[index]);
-       if (tmp == 0xDEADBEEF) {
-               DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+       if (tmp == 0xDEADBEEF)
                r = 0;
-       } else {
-               DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+       else
                r = -EINVAL;
-       }
 
 err1:
        amdgpu_ib_free(adev, &ib, NULL);
@@ -822,7 +805,7 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
  */
 static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 {
-       struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+       struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
        u32 pad_count;
        int i;
 
@@ -1214,8 +1197,11 @@ static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev,
                                             struct amdgpu_irq_src *source,
                                             struct amdgpu_iv_entry *entry)
 {
+       u8 instance_id;
+
        DRM_ERROR("Illegal instruction in SDMA command stream\n");
-       schedule_work(&adev->reset_work);
+       instance_id = (entry->ring_id & 0x3) >> 0;
+       drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
        return 0;
 }
 
index d76eb27945dc897230640e1727cc2b8def35f1cb..1dc3013ea1d5401120bb9f87393ca7875903d676 100644 (file)
@@ -1775,18 +1775,15 @@ static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
        int r;
 
        r = amdgpu_gfx_scratch_get(adev, &scratch);
-       if (r) {
-               DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+       if (r)
                return r;
-       }
+
        WREG32(scratch, 0xCAFEDEAD);
 
        r = amdgpu_ring_alloc(ring, 3);
-       if (r) {
-               DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
-               amdgpu_gfx_scratch_free(adev, scratch);
-               return r;
-       }
+       if (r)
+               goto error_free_scratch;
+
        amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
        amdgpu_ring_write(ring, (scratch - PACKET3_SET_CONFIG_REG_START));
        amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -1798,13 +1795,11 @@ static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
                        break;
                DRM_UDELAY(1);
        }
-       if (i < adev->usec_timeout) {
-               DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
-       } else {
-               DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
-                         ring->idx, scratch, tmp);
-               r = -EINVAL;
-       }
+
+       if (i >= adev->usec_timeout)
+               r = -ETIMEDOUT;
+
+error_free_scratch:
        amdgpu_gfx_scratch_free(adev, scratch);
        return r;
 }
@@ -1845,9 +1840,11 @@ static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
 }
 
 static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+                                 struct amdgpu_job *job,
                                  struct amdgpu_ib *ib,
-                                 unsigned vmid, bool ctx_switch)
+                                 bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
        u32 header, control = 0;
 
        /* insert SWITCH_BUFFER packet before first IB in the ring frame */
@@ -1892,17 +1889,15 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        long r;
 
        r = amdgpu_gfx_scratch_get(adev, &scratch);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+       if (r)
                return r;
-       }
+
        WREG32(scratch, 0xCAFEDEAD);
        memset(&ib, 0, sizeof(ib));
        r = amdgpu_ib_get(adev, NULL, 256, &ib);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+       if (r)
                goto err1;
-       }
+
        ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
        ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_START));
        ib.ptr[2] = 0xDEADBEEF;
@@ -1914,22 +1909,16 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
        r = dma_fence_wait_timeout(f, false, timeout);
        if (r == 0) {
-               DRM_ERROR("amdgpu: IB test timed out\n");
                r = -ETIMEDOUT;
                goto err2;
        } else if (r < 0) {
-               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
                goto err2;
        }
        tmp = RREG32(scratch);
-       if (tmp == 0xDEADBEEF) {
-               DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+       if (tmp == 0xDEADBEEF)
                r = 0;
-       } else {
-               DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
-                         scratch, tmp);
+       else
                r = -EINVAL;
-       }
 
 err2:
        amdgpu_ib_free(adev, &ib, NULL);
@@ -1950,9 +1939,9 @@ static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
                                      CP_ME_CNTL__CE_HALT_MASK));
                WREG32(mmSCRATCH_UMSK, 0);
                for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-                       adev->gfx.gfx_ring[i].ready = false;
+                       adev->gfx.gfx_ring[i].sched.ready = false;
                for (i = 0; i < adev->gfx.num_compute_rings; i++)
-                       adev->gfx.compute_ring[i].ready = false;
+                       adev->gfx.compute_ring[i].sched.ready = false;
        }
        udelay(50);
 }
@@ -2124,12 +2113,9 @@ static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev)
 
        /* start the rings */
        gfx_v6_0_cp_gfx_start(adev);
-       ring->ready = true;
-       r = amdgpu_ring_test_ring(ring);
-       if (r) {
-               ring->ready = false;
+       r = amdgpu_ring_test_helper(ring);
+       if (r)
                return r;
-       }
 
        return 0;
 }
@@ -2227,14 +2213,11 @@ static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev)
        WREG32(mmCP_RB2_CNTL, tmp);
        WREG32(mmCP_RB2_BASE, ring->gpu_addr >> 8);
 
-       adev->gfx.compute_ring[0].ready = false;
-       adev->gfx.compute_ring[1].ready = false;
 
        for (i = 0; i < 2; i++) {
-               r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[i]);
+               r = amdgpu_ring_test_helper(&adev->gfx.compute_ring[i]);
                if (r)
                        return r;
-               adev->gfx.compute_ring[i].ready = true;
        }
 
        return 0;
@@ -2368,18 +2351,11 @@ static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, val);
 }
 
-static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
-{
-       amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
-       amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
-       amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
-}
-
 static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
 {
        const u32 *src_ptr;
        volatile u32 *dst_ptr;
-       u32 dws, i;
+       u32 dws;
        u64 reg_list_mc_addr;
        const struct cs_section_def *cs_data;
        int r;
@@ -2394,26 +2370,10 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
        cs_data = adev->gfx.rlc.cs_data;
 
        if (src_ptr) {
-               /* save restore block */
-               r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
-                                             AMDGPU_GEM_DOMAIN_VRAM,
-                                             &adev->gfx.rlc.save_restore_obj,
-                                             &adev->gfx.rlc.save_restore_gpu_addr,
-                                             (void **)&adev->gfx.rlc.sr_ptr);
-               if (r) {
-                       dev_warn(adev->dev, "(%d) create RLC sr bo failed\n",
-                                r);
-                       gfx_v6_0_rlc_fini(adev);
+               /* init save restore block */
+               r = amdgpu_gfx_rlc_init_sr(adev, dws);
+               if (r)
                        return r;
-               }
-
-               /* write the sr buffer */
-               dst_ptr = adev->gfx.rlc.sr_ptr;
-               for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
-                       dst_ptr[i] = cpu_to_le32(src_ptr[i]);
-
-               amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
-               amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
        }
 
        if (cs_data) {
@@ -2428,7 +2388,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
                                              (void **)&adev->gfx.rlc.cs_ptr);
                if (r) {
                        dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
-                       gfx_v6_0_rlc_fini(adev);
+                       amdgpu_gfx_rlc_fini(adev);
                        return r;
                }
 
@@ -2549,8 +2509,8 @@ static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev)
        if (!adev->gfx.rlc_fw)
                return -EINVAL;
 
-       gfx_v6_0_rlc_stop(adev);
-       gfx_v6_0_rlc_reset(adev);
+       adev->gfx.rlc.funcs->stop(adev);
+       adev->gfx.rlc.funcs->reset(adev);
        gfx_v6_0_init_pg(adev);
        gfx_v6_0_init_cg(adev);
 
@@ -2578,7 +2538,7 @@ static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev)
        WREG32(mmRLC_UCODE_ADDR, 0);
 
        gfx_v6_0_enable_lbpw(adev, gfx_v6_0_lbpw_supported(adev));
-       gfx_v6_0_rlc_start(adev);
+       adev->gfx.rlc.funcs->start(adev);
 
        return 0;
 }
@@ -3075,6 +3035,14 @@ static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
        .select_me_pipe_q = &gfx_v6_0_select_me_pipe_q
 };
 
+static const struct amdgpu_rlc_funcs gfx_v6_0_rlc_funcs = {
+       .init = gfx_v6_0_rlc_init,
+       .resume = gfx_v6_0_rlc_resume,
+       .stop = gfx_v6_0_rlc_stop,
+       .reset = gfx_v6_0_rlc_reset,
+       .start = gfx_v6_0_rlc_start
+};
+
 static int gfx_v6_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -3082,6 +3050,7 @@ static int gfx_v6_0_early_init(void *handle)
        adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS;
        adev->gfx.num_compute_rings = GFX6_NUM_COMPUTE_RINGS;
        adev->gfx.funcs = &gfx_v6_0_gfx_funcs;
+       adev->gfx.rlc.funcs = &gfx_v6_0_rlc_funcs;
        gfx_v6_0_set_ring_funcs(adev);
        gfx_v6_0_set_irq_funcs(adev);
 
@@ -3114,7 +3083,7 @@ static int gfx_v6_0_sw_init(void *handle)
                return r;
        }
 
-       r = gfx_v6_0_rlc_init(adev);
+       r = adev->gfx.rlc.funcs->init(adev);
        if (r) {
                DRM_ERROR("Failed to init rlc BOs!\n");
                return r;
@@ -3165,7 +3134,7 @@ static int gfx_v6_0_sw_fini(void *handle)
        for (i = 0; i < adev->gfx.num_compute_rings; i++)
                amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
 
-       gfx_v6_0_rlc_fini(adev);
+       amdgpu_gfx_rlc_fini(adev);
 
        return 0;
 }
@@ -3177,7 +3146,7 @@ static int gfx_v6_0_hw_init(void *handle)
 
        gfx_v6_0_constants_init(adev);
 
-       r = gfx_v6_0_rlc_resume(adev);
+       r = adev->gfx.rlc.funcs->resume(adev);
        if (r)
                return r;
 
@@ -3195,7 +3164,7 @@ static int gfx_v6_0_hw_fini(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        gfx_v6_0_cp_enable(adev, false);
-       gfx_v6_0_rlc_stop(adev);
+       adev->gfx.rlc.funcs->stop(adev);
        gfx_v6_0_fini_pg(adev);
 
        return 0;
@@ -3393,12 +3362,31 @@ static int gfx_v6_0_eop_irq(struct amdgpu_device *adev,
        return 0;
 }
 
+static void gfx_v6_0_fault(struct amdgpu_device *adev,
+                          struct amdgpu_iv_entry *entry)
+{
+       struct amdgpu_ring *ring;
+
+       switch (entry->ring_id) {
+       case 0:
+               ring = &adev->gfx.gfx_ring[0];
+               break;
+       case 1:
+       case 2:
+               ring = &adev->gfx.compute_ring[entry->ring_id - 1];
+               break;
+       default:
+               return;
+       }
+       drm_sched_fault(&ring->sched);
+}
+
 static int gfx_v6_0_priv_reg_irq(struct amdgpu_device *adev,
                                 struct amdgpu_irq_src *source,
                                 struct amdgpu_iv_entry *entry)
 {
        DRM_ERROR("Illegal register access in command stream\n");
-       schedule_work(&adev->reset_work);
+       gfx_v6_0_fault(adev, entry);
        return 0;
 }
 
@@ -3407,7 +3395,7 @@ static int gfx_v6_0_priv_inst_irq(struct amdgpu_device *adev,
                                  struct amdgpu_iv_entry *entry)
 {
        DRM_ERROR("Illegal instruction in command stream\n");
-       schedule_work(&adev->reset_work);
+       gfx_v6_0_fault(adev, entry);
        return 0;
 }
 
index 0e72bc09939aca1415320b027d9f57380e6eebc4..f467b9bd090d49b6cba62864f83317d7310f44bd 100644 (file)
@@ -882,7 +882,6 @@ static const u32 kalindi_rlc_save_restore_register_list[] =
 
 static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
-static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev);
 static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
 static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
 
@@ -2064,17 +2063,14 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
        int r;
 
        r = amdgpu_gfx_scratch_get(adev, &scratch);
-       if (r) {
-               DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+       if (r)
                return r;
-       }
+
        WREG32(scratch, 0xCAFEDEAD);
        r = amdgpu_ring_alloc(ring, 3);
-       if (r) {
-               DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
-               amdgpu_gfx_scratch_free(adev, scratch);
-               return r;
-       }
+       if (r)
+               goto error_free_scratch;
+
        amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
        amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
        amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -2086,13 +2082,10 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
                        break;
                DRM_UDELAY(1);
        }
-       if (i < adev->usec_timeout) {
-               DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
-       } else {
-               DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
-                         ring->idx, scratch, tmp);
-               r = -EINVAL;
-       }
+       if (i >= adev->usec_timeout)
+               r = -ETIMEDOUT;
+
+error_free_scratch:
        amdgpu_gfx_scratch_free(adev, scratch);
        return r;
 }
@@ -2233,9 +2226,11 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
  * on the gfx ring for execution by the GPU.
  */
 static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
-                                     struct amdgpu_ib *ib,
-                                     unsigned vmid, bool ctx_switch)
+                                       struct amdgpu_job *job,
+                                       struct amdgpu_ib *ib,
+                                       bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
        u32 header, control = 0;
 
        /* insert SWITCH_BUFFER packet before first IB in the ring frame */
@@ -2262,9 +2257,11 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
 }
 
 static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+                                         struct amdgpu_job *job,
                                          struct amdgpu_ib *ib,
-                                         unsigned vmid, bool ctx_switch)
+                                         bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
        u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
 
        amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
@@ -2316,17 +2313,15 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        long r;
 
        r = amdgpu_gfx_scratch_get(adev, &scratch);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+       if (r)
                return r;
-       }
+
        WREG32(scratch, 0xCAFEDEAD);
        memset(&ib, 0, sizeof(ib));
        r = amdgpu_ib_get(adev, NULL, 256, &ib);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+       if (r)
                goto err1;
-       }
+
        ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
        ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
        ib.ptr[2] = 0xDEADBEEF;
@@ -2338,22 +2333,16 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
        r = dma_fence_wait_timeout(f, false, timeout);
        if (r == 0) {
-               DRM_ERROR("amdgpu: IB test timed out\n");
                r = -ETIMEDOUT;
                goto err2;
        } else if (r < 0) {
-               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
                goto err2;
        }
        tmp = RREG32(scratch);
-       if (tmp == 0xDEADBEEF) {
-               DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+       if (tmp == 0xDEADBEEF)
                r = 0;
-       } else {
-               DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
-                         scratch, tmp);
+       else
                r = -EINVAL;
-       }
 
 err2:
        amdgpu_ib_free(adev, &ib, NULL);
@@ -2403,7 +2392,7 @@ static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
        } else {
                WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK));
                for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-                       adev->gfx.gfx_ring[i].ready = false;
+                       adev->gfx.gfx_ring[i].sched.ready = false;
        }
        udelay(50);
 }
@@ -2613,12 +2602,9 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
 
        /* start the ring */
        gfx_v7_0_cp_gfx_start(adev);
-       ring->ready = true;
-       r = amdgpu_ring_test_ring(ring);
-       if (r) {
-               ring->ready = false;
+       r = amdgpu_ring_test_helper(ring);
+       if (r)
                return r;
-       }
 
        return 0;
 }
@@ -2675,7 +2661,7 @@ static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
        } else {
                WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
                for (i = 0; i < adev->gfx.num_compute_rings; i++)
-                       adev->gfx.compute_ring[i].ready = false;
+                       adev->gfx.compute_ring[i].sched.ready = false;
        }
        udelay(50);
 }
@@ -2781,7 +2767,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
                * GFX7_MEC_HPD_SIZE * 2;
 
        r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
-                                     AMDGPU_GEM_DOMAIN_GTT,
+                                     AMDGPU_GEM_DOMAIN_VRAM,
                                      &adev->gfx.mec.hpd_eop_obj,
                                      &adev->gfx.mec.hpd_eop_gpu_addr,
                                      (void **)&hpd);
@@ -3106,10 +3092,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
 
        for (i = 0; i < adev->gfx.num_compute_rings; i++) {
                ring = &adev->gfx.compute_ring[i];
-               ring->ready = true;
-               r = amdgpu_ring_test_ring(ring);
-               if (r)
-                       ring->ready = false;
+               amdgpu_ring_test_helper(ring);
        }
 
        return 0;
@@ -3268,18 +3251,10 @@ static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
  * The RLC is a multi-purpose microengine that handles a
  * variety of functions.
  */
-static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
-{
-       amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
-       amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
-       amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
-}
-
 static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
 {
        const u32 *src_ptr;
-       volatile u32 *dst_ptr;
-       u32 dws, i;
+       u32 dws;
        const struct cs_section_def *cs_data;
        int r;
 
@@ -3306,66 +3281,23 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
        cs_data = adev->gfx.rlc.cs_data;
 
        if (src_ptr) {
-               /* save restore block */
-               r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
-                                             AMDGPU_GEM_DOMAIN_VRAM,
-                                             &adev->gfx.rlc.save_restore_obj,
-                                             &adev->gfx.rlc.save_restore_gpu_addr,
-                                             (void **)&adev->gfx.rlc.sr_ptr);
-               if (r) {
-                       dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r);
-                       gfx_v7_0_rlc_fini(adev);
+               /* init save restore block */
+               r = amdgpu_gfx_rlc_init_sr(adev, dws);
+               if (r)
                        return r;
-               }
-
-               /* write the sr buffer */
-               dst_ptr = adev->gfx.rlc.sr_ptr;
-               for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
-                       dst_ptr[i] = cpu_to_le32(src_ptr[i]);
-               amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
-               amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
        }
 
        if (cs_data) {
-               /* clear state block */
-               adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev);
-
-               r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
-                                             AMDGPU_GEM_DOMAIN_VRAM,
-                                             &adev->gfx.rlc.clear_state_obj,
-                                             &adev->gfx.rlc.clear_state_gpu_addr,
-                                             (void **)&adev->gfx.rlc.cs_ptr);
-               if (r) {
-                       dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
-                       gfx_v7_0_rlc_fini(adev);
+               /* init clear state block */
+               r = amdgpu_gfx_rlc_init_csb(adev);
+               if (r)
                        return r;
-               }
-
-               /* set up the cs buffer */
-               dst_ptr = adev->gfx.rlc.cs_ptr;
-               gfx_v7_0_get_csb_buffer(adev, dst_ptr);
-               amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
-               amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
        }
 
        if (adev->gfx.rlc.cp_table_size) {
-
-               r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
-                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-                                             &adev->gfx.rlc.cp_table_obj,
-                                             &adev->gfx.rlc.cp_table_gpu_addr,
-                                             (void **)&adev->gfx.rlc.cp_table_ptr);
-               if (r) {
-                       dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
-                       gfx_v7_0_rlc_fini(adev);
+               r = amdgpu_gfx_rlc_init_cpt(adev);
+               if (r)
                        return r;
-               }
-
-               gfx_v7_0_init_cp_pg_table(adev);
-
-               amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
-               amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-
        }
 
        return 0;
@@ -3446,7 +3378,12 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
        return orig;
 }
 
-static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev)
+{
+       return true;
+}
+
+static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev)
 {
        u32 tmp, i, mask;
 
@@ -3468,7 +3405,7 @@ static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
        }
 }
 
-static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev)
 {
        u32 tmp;
 
@@ -3545,13 +3482,13 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
        adev->gfx.rlc_feature_version = le32_to_cpu(
                                        hdr->ucode_feature_version);
 
-       gfx_v7_0_rlc_stop(adev);
+       adev->gfx.rlc.funcs->stop(adev);
 
        /* disable CG */
        tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc;
        WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
 
-       gfx_v7_0_rlc_reset(adev);
+       adev->gfx.rlc.funcs->reset(adev);
 
        gfx_v7_0_init_pg(adev);
 
@@ -3582,7 +3519,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
        if (adev->asic_type == CHIP_BONAIRE)
                WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0);
 
-       gfx_v7_0_rlc_start(adev);
+       adev->gfx.rlc.funcs->start(adev);
 
        return 0;
 }
@@ -3784,72 +3721,12 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
                WREG32(mmRLC_PG_CNTL, data);
 }
 
-static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev)
+static int gfx_v7_0_cp_pg_table_num(struct amdgpu_device *adev)
 {
-       const __le32 *fw_data;
-       volatile u32 *dst_ptr;
-       int me, i, max_me = 4;
-       u32 bo_offset = 0;
-       u32 table_offset, table_size;
-
        if (adev->asic_type == CHIP_KAVERI)
-               max_me = 5;
-
-       if (adev->gfx.rlc.cp_table_ptr == NULL)
-               return;
-
-       /* write the cp table buffer */
-       dst_ptr = adev->gfx.rlc.cp_table_ptr;
-       for (me = 0; me < max_me; me++) {
-               if (me == 0) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.ce_fw->data +
-                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               } else if (me == 1) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.pfp_fw->data +
-                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               } else if (me == 2) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.me_fw->data +
-                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               } else if (me == 3) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.mec_fw->data +
-                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               } else {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.mec2_fw->data +
-                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               }
-
-               for (i = 0; i < table_size; i ++) {
-                       dst_ptr[bo_offset + i] =
-                               cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
-               }
-
-               bo_offset += table_size;
-       }
+               return 5;
+       else
+               return 4;
 }
 
 static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
@@ -4288,8 +4165,17 @@ static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
 };
 
 static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
-       .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode,
-       .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode
+       .is_rlc_enabled = gfx_v7_0_is_rlc_enabled,
+       .set_safe_mode = gfx_v7_0_set_safe_mode,
+       .unset_safe_mode = gfx_v7_0_unset_safe_mode,
+       .init = gfx_v7_0_rlc_init,
+       .get_csb_size = gfx_v7_0_get_csb_size,
+       .get_csb_buffer = gfx_v7_0_get_csb_buffer,
+       .get_cp_table_num = gfx_v7_0_cp_pg_table_num,
+       .resume = gfx_v7_0_rlc_resume,
+       .stop = gfx_v7_0_rlc_stop,
+       .reset = gfx_v7_0_rlc_reset,
+       .start = gfx_v7_0_rlc_start
 };
 
 static int gfx_v7_0_early_init(void *handle)
@@ -4540,7 +4426,7 @@ static int gfx_v7_0_sw_init(void *handle)
                return r;
        }
 
-       r = gfx_v7_0_rlc_init(adev);
+       r = adev->gfx.rlc.funcs->init(adev);
        if (r) {
                DRM_ERROR("Failed to init rlc BOs!\n");
                return r;
@@ -4604,7 +4490,7 @@ static int gfx_v7_0_sw_fini(void *handle)
                amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
 
        gfx_v7_0_cp_compute_fini(adev);
-       gfx_v7_0_rlc_fini(adev);
+       amdgpu_gfx_rlc_fini(adev);
        gfx_v7_0_mec_fini(adev);
        amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
                                &adev->gfx.rlc.clear_state_gpu_addr,
@@ -4627,7 +4513,7 @@ static int gfx_v7_0_hw_init(void *handle)
        gfx_v7_0_constants_init(adev);
 
        /* init rlc */
-       r = gfx_v7_0_rlc_resume(adev);
+       r = adev->gfx.rlc.funcs->resume(adev);
        if (r)
                return r;
 
@@ -4645,7 +4531,7 @@ static int gfx_v7_0_hw_fini(void *handle)
        amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
        amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
        gfx_v7_0_cp_enable(adev, false);
-       gfx_v7_0_rlc_stop(adev);
+       adev->gfx.rlc.funcs->stop(adev);
        gfx_v7_0_fini_pg(adev);
 
        return 0;
@@ -4730,7 +4616,7 @@ static int gfx_v7_0_soft_reset(void *handle)
                gfx_v7_0_update_cg(adev, false);
 
                /* stop the rlc */
-               gfx_v7_0_rlc_stop(adev);
+               adev->gfx.rlc.funcs->stop(adev);
 
                /* Disable GFX parsing/prefetching */
                WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
@@ -4959,12 +4845,36 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
        return 0;
 }
 
+static void gfx_v7_0_fault(struct amdgpu_device *adev,
+                          struct amdgpu_iv_entry *entry)
+{
+       struct amdgpu_ring *ring;
+       u8 me_id, pipe_id;
+       int i;
+
+       me_id = (entry->ring_id & 0x0c) >> 2;
+       pipe_id = (entry->ring_id & 0x03) >> 0;
+       switch (me_id) {
+       case 0:
+               drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
+               break;
+       case 1:
+       case 2:
+               for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+                       ring = &adev->gfx.compute_ring[i];
+                       if ((ring->me == me_id) && (ring->pipe == pipe_id))
+                               drm_sched_fault(&ring->sched);
+               }
+               break;
+       }
+}
+
 static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev,
                                 struct amdgpu_irq_src *source,
                                 struct amdgpu_iv_entry *entry)
 {
        DRM_ERROR("Illegal register access in command stream\n");
-       schedule_work(&adev->reset_work);
+       gfx_v7_0_fault(adev, entry);
        return 0;
 }
 
@@ -4974,7 +4884,7 @@ static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev,
 {
        DRM_ERROR("Illegal instruction in command stream\n");
        // XXX soft reset the gfx block only
-       schedule_work(&adev->reset_work);
+       gfx_v7_0_fault(adev, entry);
        return 0;
 }
 
index 617b0c8908a375aa0d132af1868f3eaf9e2067b1..cb066a8dccd7f3f2f552cdaa091bdadeba9f2179 100644 (file)
@@ -54,7 +54,7 @@
 #include "ivsrcid/ivsrcid_vislands30.h"
 
 #define GFX8_NUM_GFX_RINGS     1
-#define GFX8_MEC_HPD_SIZE 2048
+#define GFX8_MEC_HPD_SIZE 4096
 
 #define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001
 #define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001
@@ -839,18 +839,14 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
        int r;
 
        r = amdgpu_gfx_scratch_get(adev, &scratch);
-       if (r) {
-               DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+       if (r)
                return r;
-       }
+
        WREG32(scratch, 0xCAFEDEAD);
        r = amdgpu_ring_alloc(ring, 3);
-       if (r) {
-               DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
-                         ring->idx, r);
-               amdgpu_gfx_scratch_free(adev, scratch);
-               return r;
-       }
+       if (r)
+               goto error_free_scratch;
+
        amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
        amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
        amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -862,14 +858,11 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
                        break;
                DRM_UDELAY(1);
        }
-       if (i < adev->usec_timeout) {
-               DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-                        ring->idx, i);
-       } else {
-               DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
-                         ring->idx, scratch, tmp);
-               r = -EINVAL;
-       }
+
+       if (i >= adev->usec_timeout)
+               r = -ETIMEDOUT;
+
+error_free_scratch:
        amdgpu_gfx_scratch_free(adev, scratch);
        return r;
 }
@@ -886,19 +879,16 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        long r;
 
        r = amdgpu_device_wb_get(adev, &index);
-       if (r) {
-               dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+       if (r)
                return r;
-       }
 
        gpu_addr = adev->wb.gpu_addr + (index * 4);
        adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
        memset(&ib, 0, sizeof(ib));
        r = amdgpu_ib_get(adev, NULL, 16, &ib);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+       if (r)
                goto err1;
-       }
+
        ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
        ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
        ib.ptr[2] = lower_32_bits(gpu_addr);
@@ -912,22 +902,17 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
        r = dma_fence_wait_timeout(f, false, timeout);
        if (r == 0) {
-               DRM_ERROR("amdgpu: IB test timed out.\n");
                r = -ETIMEDOUT;
                goto err2;
        } else if (r < 0) {
-               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
                goto err2;
        }
 
        tmp = adev->wb.wb[index];
-       if (tmp == 0xDEADBEEF) {
-               DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+       if (tmp == 0xDEADBEEF)
                r = 0;
-       } else {
-               DRM_ERROR("ib test on ring %d failed\n", ring->idx);
+       else
                r = -EINVAL;
-       }
 
 err2:
        amdgpu_ib_free(adev, &ib, NULL);
@@ -1298,81 +1283,16 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
        buffer[count++] = cpu_to_le32(0);
 }
 
-static void cz_init_cp_jump_table(struct amdgpu_device *adev)
+static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev)
 {
-       const __le32 *fw_data;
-       volatile u32 *dst_ptr;
-       int me, i, max_me = 4;
-       u32 bo_offset = 0;
-       u32 table_offset, table_size;
-
        if (adev->asic_type == CHIP_CARRIZO)
-               max_me = 5;
-
-       /* write the cp table buffer */
-       dst_ptr = adev->gfx.rlc.cp_table_ptr;
-       for (me = 0; me < max_me; me++) {
-               if (me == 0) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.ce_fw->data +
-                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               } else if (me == 1) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.pfp_fw->data +
-                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               } else if (me == 2) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.me_fw->data +
-                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               } else if (me == 3) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.mec_fw->data +
-                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               } else  if (me == 4) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.mec2_fw->data +
-                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               }
-
-               for (i = 0; i < table_size; i ++) {
-                       dst_ptr[bo_offset + i] =
-                               cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
-               }
-
-               bo_offset += table_size;
-       }
-}
-
-static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
-{
-       amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
-       amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
+               return 5;
+       else
+               return 4;
 }
 
 static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
 {
-       volatile u32 *dst_ptr;
-       u32 dws;
        const struct cs_section_def *cs_data;
        int r;
 
@@ -1381,44 +1301,18 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
        cs_data = adev->gfx.rlc.cs_data;
 
        if (cs_data) {
-               /* clear state block */
-               adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev);
-
-               r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
-                                             AMDGPU_GEM_DOMAIN_VRAM,
-                                             &adev->gfx.rlc.clear_state_obj,
-                                             &adev->gfx.rlc.clear_state_gpu_addr,
-                                             (void **)&adev->gfx.rlc.cs_ptr);
-               if (r) {
-                       dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
-                       gfx_v8_0_rlc_fini(adev);
+               /* init clear state block */
+               r = amdgpu_gfx_rlc_init_csb(adev);
+               if (r)
                        return r;
-               }
-
-               /* set up the cs buffer */
-               dst_ptr = adev->gfx.rlc.cs_ptr;
-               gfx_v8_0_get_csb_buffer(adev, dst_ptr);
-               amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
-               amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
        }
 
        if ((adev->asic_type == CHIP_CARRIZO) ||
            (adev->asic_type == CHIP_STONEY)) {
                adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
-               r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
-                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-                                             &adev->gfx.rlc.cp_table_obj,
-                                             &adev->gfx.rlc.cp_table_gpu_addr,
-                                             (void **)&adev->gfx.rlc.cp_table_ptr);
-               if (r) {
-                       dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
+               r = amdgpu_gfx_rlc_init_cpt(adev);
+               if (r)
                        return r;
-               }
-
-               cz_init_cp_jump_table(adev);
-
-               amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
-               amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
        }
 
        return 0;
@@ -1443,7 +1337,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
        mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
 
        r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
-                                     AMDGPU_GEM_DOMAIN_GTT,
+                                     AMDGPU_GEM_DOMAIN_VRAM,
                                      &adev->gfx.mec.hpd_eop_obj,
                                      &adev->gfx.mec.hpd_eop_gpu_addr,
                                      (void **)&hpd);
@@ -1629,7 +1523,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
                return 0;
 
        /* bail if the compute ring is not ready */
-       if (!ring->ready)
+       if (!ring->sched.ready)
                return 0;
 
        tmp = RREG32(mmGB_EDC_MODE);
@@ -2088,7 +1982,7 @@ static int gfx_v8_0_sw_init(void *handle)
                return r;
        }
 
-       r = gfx_v8_0_rlc_init(adev);
+       r = adev->gfx.rlc.funcs->init(adev);
        if (r) {
                DRM_ERROR("Failed to init rlc BOs!\n");
                return r;
@@ -2181,7 +2075,7 @@ static int gfx_v8_0_sw_fini(void *handle)
        amdgpu_gfx_kiq_fini(adev);
 
        gfx_v8_0_mec_fini(adev);
-       gfx_v8_0_rlc_fini(adev);
+       amdgpu_gfx_rlc_fini(adev);
        amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
                                &adev->gfx.rlc.clear_state_gpu_addr,
                                (void **)&adev->gfx.rlc.cs_ptr);
@@ -4175,10 +4069,10 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
 
 static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
 {
-       gfx_v8_0_rlc_stop(adev);
-       gfx_v8_0_rlc_reset(adev);
+       adev->gfx.rlc.funcs->stop(adev);
+       adev->gfx.rlc.funcs->reset(adev);
        gfx_v8_0_init_pg(adev);
-       gfx_v8_0_rlc_start(adev);
+       adev->gfx.rlc.funcs->start(adev);
 
        return 0;
 }
@@ -4197,7 +4091,7 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
                tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
                tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
                for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-                       adev->gfx.gfx_ring[i].ready = false;
+                       adev->gfx.gfx_ring[i].sched.ready = false;
        }
        WREG32(mmCP_ME_CNTL, tmp);
        udelay(50);
@@ -4379,10 +4273,8 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
        /* start the ring */
        amdgpu_ring_clear_ring(ring);
        gfx_v8_0_cp_gfx_start(adev);
-       ring->ready = true;
-       r = amdgpu_ring_test_ring(ring);
-       if (r)
-               ring->ready = false;
+       ring->sched.ready = true;
+       r = amdgpu_ring_test_helper(ring);
 
        return r;
 }
@@ -4396,8 +4288,8 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
        } else {
                WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
                for (i = 0; i < adev->gfx.num_compute_rings; i++)
-                       adev->gfx.compute_ring[i].ready = false;
-               adev->gfx.kiq.ring.ready = false;
+                       adev->gfx.compute_ring[i].sched.ready = false;
+               adev->gfx.kiq.ring.sched.ready = false;
        }
        udelay(50);
 }
@@ -4473,11 +4365,9 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
                amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
        }
 
-       r = amdgpu_ring_test_ring(kiq_ring);
-       if (r) {
+       r = amdgpu_ring_test_helper(kiq_ring);
+       if (r)
                DRM_ERROR("KCQ enable failed\n");
-               kiq_ring->ready = false;
-       }
        return r;
 }
 
@@ -4781,7 +4671,7 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
        amdgpu_bo_kunmap(ring->mqd_obj);
        ring->mqd_ptr = NULL;
        amdgpu_bo_unreserve(ring->mqd_obj);
-       ring->ready = true;
+       ring->sched.ready = true;
        return 0;
 }
 
@@ -4820,10 +4710,7 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
         */
        for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) {
                ring = &adev->gfx.compute_ring[i];
-               ring->ready = true;
-               r = amdgpu_ring_test_ring(ring);
-               if (r)
-                       ring->ready = false;
+               r = amdgpu_ring_test_helper(ring);
        }
 
 done:
@@ -4867,7 +4754,7 @@ static int gfx_v8_0_hw_init(void *handle)
        gfx_v8_0_init_golden_registers(adev);
        gfx_v8_0_constants_init(adev);
 
-       r = gfx_v8_0_rlc_resume(adev);
+       r = adev->gfx.rlc.funcs->resume(adev);
        if (r)
                return r;
 
@@ -4899,7 +4786,7 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
                amdgpu_ring_write(kiq_ring, 0);
                amdgpu_ring_write(kiq_ring, 0);
        }
-       r = amdgpu_ring_test_ring(kiq_ring);
+       r = amdgpu_ring_test_helper(kiq_ring);
        if (r)
                DRM_ERROR("KCQ disable failed\n");
 
@@ -4973,16 +4860,16 @@ static int gfx_v8_0_hw_fini(void *handle)
                pr_debug("For SRIOV client, shouldn't do anything.\n");
                return 0;
        }
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
        if (!gfx_v8_0_wait_for_idle(adev))
                gfx_v8_0_cp_enable(adev, false);
        else
                pr_err("cp is busy, skip halt cp\n");
        if (!gfx_v8_0_wait_for_rlc_idle(adev))
-               gfx_v8_0_rlc_stop(adev);
+               adev->gfx.rlc.funcs->stop(adev);
        else
                pr_err("rlc is busy, skip halt rlc\n");
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
        return 0;
 }
 
@@ -5071,7 +4958,7 @@ static int gfx_v8_0_pre_soft_reset(void *handle)
        srbm_soft_reset = adev->gfx.srbm_soft_reset;
 
        /* stop the rlc */
-       gfx_v8_0_rlc_stop(adev);
+       adev->gfx.rlc.funcs->stop(adev);
 
        if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
            REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
@@ -5197,7 +5084,7 @@ static int gfx_v8_0_post_soft_reset(void *handle)
            REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
                gfx_v8_0_cp_gfx_resume(adev);
 
-       gfx_v8_0_rlc_start(adev);
+       adev->gfx.rlc.funcs->start(adev);
 
        return 0;
 }
@@ -5445,7 +5332,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
                                AMD_PG_SUPPORT_RLC_SMU_HS |
                                AMD_PG_SUPPORT_CP |
                                AMD_PG_SUPPORT_GFX_DMG))
-               adev->gfx.rlc.funcs->enter_safe_mode(adev);
+               amdgpu_gfx_rlc_enter_safe_mode(adev);
        switch (adev->asic_type) {
        case CHIP_CARRIZO:
        case CHIP_STONEY:
@@ -5499,7 +5386,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
                                AMD_PG_SUPPORT_RLC_SMU_HS |
                                AMD_PG_SUPPORT_CP |
                                AMD_PG_SUPPORT_GFX_DMG))
-               adev->gfx.rlc.funcs->exit_safe_mode(adev);
+               amdgpu_gfx_rlc_exit_safe_mode(adev);
        return 0;
 }
 
@@ -5593,57 +5480,53 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
 #define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
 #define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
 
-static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev)
 {
-       u32 data;
-       unsigned i;
+       uint32_t rlc_setting;
 
-       data = RREG32(mmRLC_CNTL);
-       if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
-               return;
+       rlc_setting = RREG32(mmRLC_CNTL);
+       if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
+               return false;
 
-       if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
-               data |= RLC_SAFE_MODE__CMD_MASK;
-               data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
-               data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
-               WREG32(mmRLC_SAFE_MODE, data);
+       return true;
+}
 
-               for (i = 0; i < adev->usec_timeout; i++) {
-                       if ((RREG32(mmRLC_GPM_STAT) &
-                            (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
-                             RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
-                           (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
-                            RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
-                               break;
-                       udelay(1);
-               }
+static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev)
+{
+       uint32_t data;
+       unsigned i;
+       data = RREG32(mmRLC_CNTL);
+       data |= RLC_SAFE_MODE__CMD_MASK;
+       data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
+       data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
+       WREG32(mmRLC_SAFE_MODE, data);
 
-               for (i = 0; i < adev->usec_timeout; i++) {
-                       if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
-                               break;
-                       udelay(1);
-               }
-               adev->gfx.rlc.in_safe_mode = true;
+       /* wait for RLC_SAFE_MODE */
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if ((RREG32(mmRLC_GPM_STAT) &
+                    (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+                     RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
+                   (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+                    RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
+                       break;
+               udelay(1);
+       }
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+                       break;
+               udelay(1);
        }
 }
 
-static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev)
 {
-       u32 data = 0;
+       uint32_t data;
        unsigned i;
 
        data = RREG32(mmRLC_CNTL);
-       if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
-               return;
-
-       if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
-               if (adev->gfx.rlc.in_safe_mode) {
-                       data |= RLC_SAFE_MODE__CMD_MASK;
-                       data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
-                       WREG32(mmRLC_SAFE_MODE, data);
-                       adev->gfx.rlc.in_safe_mode = false;
-               }
-       }
+       data |= RLC_SAFE_MODE__CMD_MASK;
+       data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
+       WREG32(mmRLC_SAFE_MODE, data);
 
        for (i = 0; i < adev->usec_timeout; i++) {
                if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
@@ -5653,8 +5536,17 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
 }
 
 static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
-       .enter_safe_mode = iceland_enter_rlc_safe_mode,
-       .exit_safe_mode = iceland_exit_rlc_safe_mode
+       .is_rlc_enabled = gfx_v8_0_is_rlc_enabled,
+       .set_safe_mode = gfx_v8_0_set_safe_mode,
+       .unset_safe_mode = gfx_v8_0_unset_safe_mode,
+       .init = gfx_v8_0_rlc_init,
+       .get_csb_size = gfx_v8_0_get_csb_size,
+       .get_csb_buffer = gfx_v8_0_get_csb_buffer,
+       .get_cp_table_num = gfx_v8_0_cp_jump_table_num,
+       .resume = gfx_v8_0_rlc_resume,
+       .stop = gfx_v8_0_rlc_stop,
+       .reset = gfx_v8_0_rlc_reset,
+       .start = gfx_v8_0_rlc_start
 };
 
 static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
@@ -5662,7 +5554,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
 {
        uint32_t temp, data;
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        /* It is disabled by HW by default */
        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
@@ -5758,7 +5650,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
                gfx_v8_0_wait_for_rlc_serdes(adev);
        }
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 }
 
 static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
@@ -5768,7 +5660,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
 
        temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
                temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
@@ -5851,7 +5743,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
 
        gfx_v8_0_wait_for_rlc_serdes(adev);
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 }
 static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
                                            bool enable)
@@ -6131,9 +6023,11 @@ static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
 }
 
 static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
-                                     struct amdgpu_ib *ib,
-                                     unsigned vmid, bool ctx_switch)
+                                       struct amdgpu_job *job,
+                                       struct amdgpu_ib *ib,
+                                       bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
        u32 header, control = 0;
 
        if (ib->flags & AMDGPU_IB_FLAG_CE)
@@ -6161,9 +6055,11 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
 }
 
 static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+                                         struct amdgpu_job *job,
                                          struct amdgpu_ib *ib,
-                                         unsigned vmid, bool ctx_switch)
+                                         bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
        u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
 
        amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
@@ -6738,12 +6634,39 @@ static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
        return 0;
 }
 
+static void gfx_v8_0_fault(struct amdgpu_device *adev,
+                          struct amdgpu_iv_entry *entry)
+{
+       u8 me_id, pipe_id, queue_id;
+       struct amdgpu_ring *ring;
+       int i;
+
+       me_id = (entry->ring_id & 0x0c) >> 2;
+       pipe_id = (entry->ring_id & 0x03) >> 0;
+       queue_id = (entry->ring_id & 0x70) >> 4;
+
+       switch (me_id) {
+       case 0:
+               drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
+               break;
+       case 1:
+       case 2:
+               for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+                       ring = &adev->gfx.compute_ring[i];
+                       if (ring->me == me_id && ring->pipe == pipe_id &&
+                           ring->queue == queue_id)
+                               drm_sched_fault(&ring->sched);
+               }
+               break;
+       }
+}
+
 static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev,
                                 struct amdgpu_irq_src *source,
                                 struct amdgpu_iv_entry *entry)
 {
        DRM_ERROR("Illegal register access in command stream\n");
-       schedule_work(&adev->reset_work);
+       gfx_v8_0_fault(adev, entry);
        return 0;
 }
 
@@ -6752,7 +6675,7 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
                                  struct amdgpu_iv_entry *entry)
 {
        DRM_ERROR("Illegal instruction in command stream\n");
-       schedule_work(&adev->reset_work);
+       gfx_v8_0_fault(adev, entry);
        return 0;
 }
 
@@ -6976,10 +6899,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
                17 + /* gfx_v8_0_ring_emit_vm_flush */
                7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */
        .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
-       .emit_ib = gfx_v8_0_ring_emit_ib_compute,
        .emit_fence = gfx_v8_0_ring_emit_fence_kiq,
        .test_ring = gfx_v8_0_ring_test_ring,
-       .test_ib = gfx_v8_0_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .emit_rreg = gfx_v8_0_ring_emit_rreg,
index 6d7baf59d6e11e947c83ef34d716c5a546d6460f..c27caa144c576089eb6ea795009bea4b5bf966a7 100644 (file)
@@ -41,7 +41,7 @@
 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
 
 #define GFX9_NUM_GFX_RINGS     1
-#define GFX9_MEC_HPD_SIZE 2048
+#define GFX9_MEC_HPD_SIZE 4096
 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
 
@@ -396,18 +396,14 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
        int r;
 
        r = amdgpu_gfx_scratch_get(adev, &scratch);
-       if (r) {
-               DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+       if (r)
                return r;
-       }
+
        WREG32(scratch, 0xCAFEDEAD);
        r = amdgpu_ring_alloc(ring, 3);
-       if (r) {
-               DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
-                         ring->idx, r);
-               amdgpu_gfx_scratch_free(adev, scratch);
-               return r;
-       }
+       if (r)
+               goto error_free_scratch;
+
        amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
        amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
        amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -419,14 +415,11 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
                        break;
                DRM_UDELAY(1);
        }
-       if (i < adev->usec_timeout) {
-               DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-                        ring->idx, i);
-       } else {
-               DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
-                         ring->idx, scratch, tmp);
-               r = -EINVAL;
-       }
+
+       if (i >= adev->usec_timeout)
+               r = -ETIMEDOUT;
+
+error_free_scratch:
        amdgpu_gfx_scratch_free(adev, scratch);
        return r;
 }
@@ -443,19 +436,16 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        long r;
 
        r = amdgpu_device_wb_get(adev, &index);
-       if (r) {
-               dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+       if (r)
                return r;
-       }
 
        gpu_addr = adev->wb.gpu_addr + (index * 4);
        adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
        memset(&ib, 0, sizeof(ib));
        r = amdgpu_ib_get(adev, NULL, 16, &ib);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+       if (r)
                goto err1;
-       }
+
        ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
        ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
        ib.ptr[2] = lower_32_bits(gpu_addr);
@@ -469,22 +459,17 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
        r = dma_fence_wait_timeout(f, false, timeout);
        if (r == 0) {
-                       DRM_ERROR("amdgpu: IB test timed out.\n");
-                       r = -ETIMEDOUT;
-                       goto err2;
+               r = -ETIMEDOUT;
+               goto err2;
        } else if (r < 0) {
-                       DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
-                       goto err2;
+               goto err2;
        }
 
        tmp = adev->wb.wb[index];
-       if (tmp == 0xDEADBEEF) {
-                       DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
-                       r = 0;
-       } else {
-                       DRM_ERROR("ib test on ring %d failed\n", ring->idx);
-                       r = -EINVAL;
-       }
+       if (tmp == 0xDEADBEEF)
+               r = 0;
+       else
+               r = -EINVAL;
 
 err2:
        amdgpu_ib_free(adev, &ib, NULL);
@@ -1065,85 +1050,13 @@ static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
        WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
 }
 
-static void rv_init_cp_jump_table(struct amdgpu_device *adev)
-{
-       const __le32 *fw_data;
-       volatile u32 *dst_ptr;
-       int me, i, max_me = 5;
-       u32 bo_offset = 0;
-       u32 table_offset, table_size;
-
-       /* write the cp table buffer */
-       dst_ptr = adev->gfx.rlc.cp_table_ptr;
-       for (me = 0; me < max_me; me++) {
-               if (me == 0) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.ce_fw->data +
-                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               } else if (me == 1) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.pfp_fw->data +
-                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               } else if (me == 2) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.me_fw->data +
-                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               } else if (me == 3) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.mec_fw->data +
-                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               } else  if (me == 4) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.mec2_fw->data +
-                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               }
-
-               for (i = 0; i < table_size; i ++) {
-                       dst_ptr[bo_offset + i] =
-                               cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
-               }
-
-               bo_offset += table_size;
-       }
-}
-
-static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev)
+static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
 {
-       /* clear state block */
-       amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
-                       &adev->gfx.rlc.clear_state_gpu_addr,
-                       (void **)&adev->gfx.rlc.cs_ptr);
-
-       /* jump table block */
-       amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
-                       &adev->gfx.rlc.cp_table_gpu_addr,
-                       (void **)&adev->gfx.rlc.cp_table_ptr);
+       return 5;
 }
 
 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
 {
-       volatile u32 *dst_ptr;
-       u32 dws;
        const struct cs_section_def *cs_data;
        int r;
 
@@ -1152,45 +1065,18 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
        cs_data = adev->gfx.rlc.cs_data;
 
        if (cs_data) {
-               /* clear state block */
-               adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
-               r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
-                                             AMDGPU_GEM_DOMAIN_VRAM,
-                                             &adev->gfx.rlc.clear_state_obj,
-                                             &adev->gfx.rlc.clear_state_gpu_addr,
-                                             (void **)&adev->gfx.rlc.cs_ptr);
-               if (r) {
-                       dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
-                               r);
-                       gfx_v9_0_rlc_fini(adev);
+               /* init clear state block */
+               r = amdgpu_gfx_rlc_init_csb(adev);
+               if (r)
                        return r;
-               }
-               /* set up the cs buffer */
-               dst_ptr = adev->gfx.rlc.cs_ptr;
-               gfx_v9_0_get_csb_buffer(adev, dst_ptr);
-               amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
-               amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
-               amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
        }
 
        if (adev->asic_type == CHIP_RAVEN) {
                /* TODO: double check the cp_table_size for RV */
                adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
-               r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
-                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-                                             &adev->gfx.rlc.cp_table_obj,
-                                             &adev->gfx.rlc.cp_table_gpu_addr,
-                                             (void **)&adev->gfx.rlc.cp_table_ptr);
-               if (r) {
-                       dev_err(adev->dev,
-                               "(%d) failed to create cp table bo\n", r);
-                       gfx_v9_0_rlc_fini(adev);
+               r = amdgpu_gfx_rlc_init_cpt(adev);
+               if (r)
                        return r;
-               }
-
-               rv_init_cp_jump_table(adev);
-               amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
-               amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
        }
 
        switch (adev->asic_type) {
@@ -1264,7 +1150,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
        mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
 
        r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
-                                     AMDGPU_GEM_DOMAIN_GTT,
+                                     AMDGPU_GEM_DOMAIN_VRAM,
                                      &adev->gfx.mec.hpd_eop_obj,
                                      &adev->gfx.mec.hpd_eop_gpu_addr,
                                      (void **)&hpd);
@@ -1635,8 +1521,8 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
        /* Clear GDS reserved memory */
        r = amdgpu_ring_alloc(ring, 17);
        if (r) {
-               DRM_ERROR("amdgpu: NGG failed to lock ring %d (%d).\n",
-                         ring->idx, r);
+               DRM_ERROR("amdgpu: NGG failed to lock ring %s (%d).\n",
+                         ring->name, r);
                return r;
        }
 
@@ -1748,7 +1634,7 @@ static int gfx_v9_0_sw_init(void *handle)
                return r;
        }
 
-       r = gfx_v9_0_rlc_init(adev);
+       r = adev->gfx.rlc.funcs->init(adev);
        if (r) {
                DRM_ERROR("Failed to init rlc BOs!\n");
                return r;
@@ -2498,12 +2384,12 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
                return 0;
        }
 
-       gfx_v9_0_rlc_stop(adev);
+       adev->gfx.rlc.funcs->stop(adev);
 
        /* disable CG */
        WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
 
-       gfx_v9_0_rlc_reset(adev);
+       adev->gfx.rlc.funcs->reset(adev);
 
        gfx_v9_0_init_pg(adev);
 
@@ -2514,15 +2400,24 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
                        return r;
        }
 
-       if (adev->asic_type == CHIP_RAVEN ||
-           adev->asic_type == CHIP_VEGA20) {
-               if (amdgpu_lbpw != 0)
+       switch (adev->asic_type) {
+       case CHIP_RAVEN:
+               if (amdgpu_lbpw == 0)
+                       gfx_v9_0_enable_lbpw(adev, false);
+               else
+                       gfx_v9_0_enable_lbpw(adev, true);
+               break;
+       case CHIP_VEGA20:
+               if (amdgpu_lbpw > 0)
                        gfx_v9_0_enable_lbpw(adev, true);
                else
                        gfx_v9_0_enable_lbpw(adev, false);
+               break;
+       default:
+               break;
        }
 
-       gfx_v9_0_rlc_start(adev);
+       adev->gfx.rlc.funcs->start(adev);
 
        return 0;
 }
@@ -2537,7 +2432,7 @@ static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
        if (!enable) {
                for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-                       adev->gfx.gfx_ring[i].ready = false;
+                       adev->gfx.gfx_ring[i].sched.ready = false;
        }
        WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
        udelay(50);
@@ -2727,7 +2622,7 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
 
        /* start the ring */
        gfx_v9_0_cp_gfx_start(adev);
-       ring->ready = true;
+       ring->sched.ready = true;
 
        return 0;
 }
@@ -2742,8 +2637,8 @@ static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
                WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
                        (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
                for (i = 0; i < adev->gfx.num_compute_rings; i++)
-                       adev->gfx.compute_ring[i].ready = false;
-               adev->gfx.kiq.ring.ready = false;
+                       adev->gfx.compute_ring[i].sched.ready = false;
+               adev->gfx.kiq.ring.sched.ready = false;
        }
        udelay(50);
 }
@@ -2866,11 +2761,9 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
                amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
        }
 
-       r = amdgpu_ring_test_ring(kiq_ring);
-       if (r) {
+       r = amdgpu_ring_test_helper(kiq_ring);
+       if (r)
                DRM_ERROR("KCQ enable failed\n");
-               kiq_ring->ready = false;
-       }
 
        return r;
 }
@@ -3249,7 +3142,7 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
        amdgpu_bo_kunmap(ring->mqd_obj);
        ring->mqd_ptr = NULL;
        amdgpu_bo_unreserve(ring->mqd_obj);
-       ring->ready = true;
+       ring->sched.ready = true;
        return 0;
 }
 
@@ -3314,19 +3207,13 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
                return r;
 
        ring = &adev->gfx.gfx_ring[0];
-       r = amdgpu_ring_test_ring(ring);
-       if (r) {
-               ring->ready = false;
+       r = amdgpu_ring_test_helper(ring);
+       if (r)
                return r;
-       }
 
        for (i = 0; i < adev->gfx.num_compute_rings; i++) {
                ring = &adev->gfx.compute_ring[i];
-
-               ring->ready = true;
-               r = amdgpu_ring_test_ring(ring);
-               if (r)
-                       ring->ready = false;
+               amdgpu_ring_test_helper(ring);
        }
 
        gfx_v9_0_enable_gui_idle_interrupt(adev, true);
@@ -3353,7 +3240,7 @@ static int gfx_v9_0_hw_init(void *handle)
        if (r)
                return r;
 
-       r = gfx_v9_0_rlc_resume(adev);
+       r = adev->gfx.rlc.funcs->resume(adev);
        if (r)
                return r;
 
@@ -3391,7 +3278,7 @@ static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev)
                amdgpu_ring_write(kiq_ring, 0);
                amdgpu_ring_write(kiq_ring, 0);
        }
-       r = amdgpu_ring_test_ring(kiq_ring);
+       r = amdgpu_ring_test_helper(kiq_ring);
        if (r)
                DRM_ERROR("KCQ disable failed\n");
 
@@ -3433,7 +3320,7 @@ static int gfx_v9_0_hw_fini(void *handle)
        }
 
        gfx_v9_0_cp_enable(adev, false);
-       gfx_v9_0_rlc_stop(adev);
+       adev->gfx.rlc.funcs->stop(adev);
 
        gfx_v9_0_csb_vram_unpin(adev);
 
@@ -3508,7 +3395,7 @@ static int gfx_v9_0_soft_reset(void *handle)
 
        if (grbm_soft_reset) {
                /* stop the rlc */
-               gfx_v9_0_rlc_stop(adev);
+               adev->gfx.rlc.funcs->stop(adev);
 
                /* Disable GFX parsing/prefetching */
                gfx_v9_0_cp_gfx_enable(adev, false);
@@ -3607,64 +3494,47 @@ static int gfx_v9_0_late_init(void *handle)
        return 0;
 }
 
-static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
 {
-       uint32_t rlc_setting, data;
-       unsigned i;
-
-       if (adev->gfx.rlc.in_safe_mode)
-               return;
+       uint32_t rlc_setting;
 
        /* if RLC is not enabled, do nothing */
        rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
        if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
-               return;
-
-       if (adev->cg_flags &
-           (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
-            AMD_CG_SUPPORT_GFX_3D_CGCG)) {
-               data = RLC_SAFE_MODE__CMD_MASK;
-               data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
-               WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
+               return false;
 
-               /* wait for RLC_SAFE_MODE */
-               for (i = 0; i < adev->usec_timeout; i++) {
-                       if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
-                               break;
-                       udelay(1);
-               }
-               adev->gfx.rlc.in_safe_mode = true;
-       }
+       return true;
 }
 
-static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
 {
-       uint32_t rlc_setting, data;
-
-       if (!adev->gfx.rlc.in_safe_mode)
-               return;
+       uint32_t data;
+       unsigned i;
 
-       /* if RLC is not enabled, do nothing */
-       rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
-       if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
-               return;
+       data = RLC_SAFE_MODE__CMD_MASK;
+       data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
+       WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
 
-       if (adev->cg_flags &
-           (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
-               /*
-                * Try to exit safe mode only if it is already in safe
-                * mode.
-                */
-               data = RLC_SAFE_MODE__CMD_MASK;
-               WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
-               adev->gfx.rlc.in_safe_mode = false;
+       /* wait for RLC_SAFE_MODE */
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+                       break;
+               udelay(1);
        }
 }
 
+static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
+{
+       uint32_t data;
+
+       data = RLC_SAFE_MODE__CMD_MASK;
+       WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
+}
+
 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
                                                bool enable)
 {
-       gfx_v9_0_enter_rlc_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
                gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
@@ -3675,7 +3545,7 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
                gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
        }
 
-       gfx_v9_0_exit_rlc_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 }
 
 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
@@ -3773,7 +3643,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
 {
        uint32_t data, def;
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        /* Enable 3D CGCG/CGLS */
        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
@@ -3813,7 +3683,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
                        WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
        }
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 }
 
 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
@@ -3821,7 +3691,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
 {
        uint32_t def, data;
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
                def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
@@ -3861,7 +3731,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
                        WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
        }
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 }
 
 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
@@ -3890,8 +3760,17 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
 }
 
 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
-       .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode,
-       .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode
+       .is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
+       .set_safe_mode = gfx_v9_0_set_safe_mode,
+       .unset_safe_mode = gfx_v9_0_unset_safe_mode,
+       .init = gfx_v9_0_rlc_init,
+       .get_csb_size = gfx_v9_0_get_csb_size,
+       .get_csb_buffer = gfx_v9_0_get_csb_buffer,
+       .get_cp_table_num = gfx_v9_0_cp_jump_table_num,
+       .resume = gfx_v9_0_rlc_resume,
+       .stop = gfx_v9_0_rlc_stop,
+       .reset = gfx_v9_0_rlc_reset,
+       .start = gfx_v9_0_rlc_start
 };
 
 static int gfx_v9_0_set_powergating_state(void *handle,
@@ -4072,9 +3951,11 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 }
 
 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
-                                      struct amdgpu_ib *ib,
-                                      unsigned vmid, bool ctx_switch)
+                                       struct amdgpu_job *job,
+                                       struct amdgpu_ib *ib,
+                                       bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
        u32 header, control = 0;
 
        if (ib->flags & AMDGPU_IB_FLAG_CE)
@@ -4103,20 +3984,22 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
 }
 
 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
-                                          struct amdgpu_ib *ib,
-                                          unsigned vmid, bool ctx_switch)
+                                         struct amdgpu_job *job,
+                                         struct amdgpu_ib *ib,
+                                         bool ctx_switch)
 {
-        u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+       u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
 
-        amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+       amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
        BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
-        amdgpu_ring_write(ring,
+       amdgpu_ring_write(ring,
 #ifdef __BIG_ENDIAN
-                                (2 << 0) |
+                               (2 << 0) |
 #endif
-                                lower_32_bits(ib->gpu_addr));
-        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
-        amdgpu_ring_write(ring, control);
+                               lower_32_bits(ib->gpu_addr));
+       amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+       amdgpu_ring_write(ring, control);
 }
 
 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
@@ -4695,12 +4578,39 @@ static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
        return 0;
 }
 
+static void gfx_v9_0_fault(struct amdgpu_device *adev,
+                          struct amdgpu_iv_entry *entry)
+{
+       u8 me_id, pipe_id, queue_id;
+       struct amdgpu_ring *ring;
+       int i;
+
+       me_id = (entry->ring_id & 0x0c) >> 2;
+       pipe_id = (entry->ring_id & 0x03) >> 0;
+       queue_id = (entry->ring_id & 0x70) >> 4;
+
+       switch (me_id) {
+       case 0:
+               drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
+               break;
+       case 1:
+       case 2:
+               for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+                       ring = &adev->gfx.compute_ring[i];
+                       if (ring->me == me_id && ring->pipe == pipe_id &&
+                           ring->queue == queue_id)
+                               drm_sched_fault(&ring->sched);
+               }
+               break;
+       }
+}
+
 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
                                 struct amdgpu_irq_src *source,
                                 struct amdgpu_iv_entry *entry)
 {
        DRM_ERROR("Illegal register access in command stream\n");
-       schedule_work(&adev->reset_work);
+       gfx_v9_0_fault(adev, entry);
        return 0;
 }
 
@@ -4709,7 +4619,7 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
                                  struct amdgpu_iv_entry *entry)
 {
        DRM_ERROR("Illegal instruction in command stream\n");
-       schedule_work(&adev->reset_work);
+       gfx_v9_0_fault(adev, entry);
        return 0;
 }
 
@@ -4836,10 +4746,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
                2 + /* gfx_v9_0_ring_emit_vm_flush */
                8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
        .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
-       .emit_ib = gfx_v9_0_ring_emit_ib_compute,
        .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
        .test_ring = gfx_v9_0_ring_test_ring,
-       .test_ib = gfx_v9_0_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .emit_rreg = gfx_v9_0_ring_emit_rreg,
index ceb7847b504f70fe73435e5b81b4ee4da5588421..f5edddf3b29d5310ce4e6474d1a04be5e248adbb 100644 (file)
@@ -35,20 +35,25 @@ u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev)
        return (u64)RREG32_SOC15(GC, 0, mmMC_VM_FB_OFFSET) << 24;
 }
 
-static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
+void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+                               uint64_t page_table_base)
 {
-       uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+       /* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */
+       int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+                       - mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
 
-       WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
-                    lower_32_bits(value));
+       WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+                               offset * vmid, lower_32_bits(page_table_base));
 
-       WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
-                    upper_32_bits(value));
+       WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+                               offset * vmid, upper_32_bits(page_table_base));
 }
 
 static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
 {
-       gfxhub_v1_0_init_gart_pt_regs(adev);
+       uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+       gfxhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
 
        WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
                     (u32)(adev->gmc.gart_start >> 12));
@@ -72,7 +77,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
 
        /* Program the system aperture low logical page number. */
        WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
-                    min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18);
+                    min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
 
        if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
                /*
@@ -82,11 +87,11 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
                 * to get rid of the VM fault and hardware hang.
                 */
                WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
-                            max((adev->gmc.vram_end >> 18) + 0x1,
+                            max((adev->gmc.fb_end >> 18) + 0x1,
                                 adev->gmc.agp_end >> 18));
        else
                WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
-                            max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
+                            max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
 
        /* Set default page address. */
        value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
index 206e29cad7533579f460f3964fc5c07326d50bf1..92d3a70cd9b15ca9b6a956e14d194e148913dc7e 100644 (file)
@@ -30,5 +30,7 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
                                          bool value);
 void gfxhub_v1_0_init(struct amdgpu_device *adev);
 u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev);
+void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+                               uint64_t page_table_base);
 
 #endif
index e1c2b4e9c7b23a10ac3b1b2b5375d2bf84eae9c2..2821d1d846e4aeb9cc40453c3e7a2a8bbbcaa7ae 100644 (file)
@@ -358,7 +358,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
        return 0;
 }
 
-static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
+static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev,
+                               uint32_t vmid, uint32_t flush_type)
 {
        WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
 }
@@ -580,7 +581,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
        else
                gmc_v6_0_set_fault_enable_default(adev, true);
 
-       gmc_v6_0_flush_gpu_tlb(adev, 0);
+       gmc_v6_0_flush_gpu_tlb(adev, 0, 0);
        dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(adev->gmc.gart_size >> 20),
                 (unsigned long long)table_addr);
index 910c4ce19cb3b329e49217e9901c1da5840a829d..761dcfb2fec029e483cb7a90ae1fa004cbfe88eb 100644 (file)
@@ -430,7 +430,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
  *
  * Flush the TLB for the requested page table (CIK).
  */
-static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
+static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev,
+                               uint32_t vmid, uint32_t flush_type)
 {
        /* bits 0-15 are the VM contexts0-15 */
        WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
@@ -698,7 +699,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
                WREG32(mmCHUB_CONTROL, tmp);
        }
 
-       gmc_v7_0_flush_gpu_tlb(adev, 0);
+       gmc_v7_0_flush_gpu_tlb(adev, 0, 0);
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(adev->gmc.gart_size >> 20),
                 (unsigned long long)table_addr);
index 1d3265c97b704b5a403cca7721818ac91dad6c4c..531aaf37759205cf0849f1ada7c13257ddf31599 100644 (file)
@@ -611,7 +611,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
  * Flush the TLB for the requested page table (CIK).
  */
 static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev,
-                                       uint32_t vmid)
+                               uint32_t vmid, uint32_t flush_type)
 {
        /* bits 0-15 are the VM contexts0-15 */
        WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
@@ -920,7 +920,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
        else
                gmc_v8_0_set_fault_enable_default(adev, true);
 
-       gmc_v8_0_flush_gpu_tlb(adev, 0);
+       gmc_v8_0_flush_gpu_tlb(adev, 0, 0);
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(adev->gmc.gart_size >> 20),
                 (unsigned long long)table_addr);
index f35d7a554ad539af16c6dfc4864efcabcbd10980..811231e4ec53a68bb322a99b86859e8417cdc9ca 100644 (file)
@@ -293,14 +293,14 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
        adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
 }
 
-static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
+static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
+                                       uint32_t flush_type)
 {
        u32 req = 0;
 
-       /* invalidate using legacy mode on vmid*/
        req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
                            PER_VMID_INVALIDATE_REQ, 1 << vmid);
-       req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
+       req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
        req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
        req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
        req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
@@ -312,48 +312,6 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
        return req;
 }
 
-static signed long  amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
-                                                 uint32_t reg0, uint32_t reg1,
-                                                 uint32_t ref, uint32_t mask)
-{
-       signed long r, cnt = 0;
-       unsigned long flags;
-       uint32_t seq;
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
-       struct amdgpu_ring *ring = &kiq->ring;
-
-       spin_lock_irqsave(&kiq->ring_lock, flags);
-
-       amdgpu_ring_alloc(ring, 32);
-       amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
-                                           ref, mask);
-       amdgpu_fence_emit_polling(ring, &seq);
-       amdgpu_ring_commit(ring);
-       spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
-       r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-
-       /* don't wait anymore for IRQ context */
-       if (r < 1 && in_interrupt())
-               goto failed_kiq;
-
-       might_sleep();
-
-       while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
-               msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
-               r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-       }
-
-       if (cnt > MAX_KIQ_REG_TRY)
-               goto failed_kiq;
-
-       return 0;
-
-failed_kiq:
-       pr_err("failed to invalidate tlb with kiq\n");
-       return r;
-}
-
 /*
  * GART
  * VMID 0 is the physical GPU addresses as used by the kernel.
@@ -362,64 +320,47 @@ failed_kiq:
  */
 
 /**
- * gmc_v9_0_flush_gpu_tlb - gart tlb flush callback
+ * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
  *
  * @adev: amdgpu_device pointer
  * @vmid: vm instance to flush
+ * @flush_type: the flush type
  *
- * Flush the TLB for the requested page table.
+ * Flush the TLB for the requested page table using certain type.
  */
 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
-                                       uint32_t vmid)
+                               uint32_t vmid, uint32_t flush_type)
 {
-       /* Use register 17 for GART */
        const unsigned eng = 17;
        unsigned i, j;
-       int r;
 
        for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
                struct amdgpu_vmhub *hub = &adev->vmhub[i];
-               u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
-
-               if (adev->gfx.kiq.ring.ready &&
-                   (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
-                   !adev->in_gpu_reset) {
-                       r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng,
-                               hub->vm_inv_eng0_ack + eng, tmp, 1 << vmid);
-                       if (!r)
-                               continue;
-               }
+               u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
 
-               spin_lock(&adev->gmc.invalidate_lock);
+               if (i == AMDGPU_GFXHUB && !adev->in_gpu_reset &&
+                   adev->gfx.kiq.ring.sched.ready &&
+                   (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
+                       uint32_t req = hub->vm_inv_eng0_req + eng;
+                       uint32_t ack = hub->vm_inv_eng0_ack + eng;
 
-               WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
-
-               /* Busy wait for ACK.*/
-               for (j = 0; j < 100; j++) {
-                       tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
-                       tmp &= 1 << vmid;
-                       if (tmp)
-                               break;
-                       cpu_relax();
-               }
-               if (j < 100) {
-                       spin_unlock(&adev->gmc.invalidate_lock);
+                       amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
+                                                          1 << vmid);
                        continue;
                }
 
-               /* Wait for ACK with a delay.*/
+               spin_lock(&adev->gmc.invalidate_lock);
+               WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
                for (j = 0; j < adev->usec_timeout; j++) {
                        tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
-                       tmp &= 1 << vmid;
-                       if (tmp)
+                       if (tmp & (1 << vmid))
                                break;
                        udelay(1);
                }
-               if (j < adev->usec_timeout) {
-                       spin_unlock(&adev->gmc.invalidate_lock);
-                       continue;
-               }
                spin_unlock(&adev->gmc.invalidate_lock);
+               if (j < adev->usec_timeout)
+                       continue;
+
                DRM_ERROR("Timeout waiting for VM flush ACK!\n");
        }
 }
@@ -429,7 +370,7 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
 {
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
-       uint32_t req = gmc_v9_0_get_invalidate_req(vmid);
+       uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
        unsigned eng = ring->vm_inv_eng;
 
        amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
@@ -739,9 +680,8 @@ static int gmc_v9_0_late_init(void *handle)
                unsigned vmhub = ring->funcs->vmhub;
 
                ring->vm_inv_eng = vm_inv_eng[vmhub]++;
-               dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
-                        ring->idx, ring->name, ring->vm_inv_eng,
-                        ring->funcs->vmhub);
+               dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
+                        ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
        }
 
        /* Engine 16 is used for KFD and 17 for GART flushes */
@@ -1122,7 +1062,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
 
        gfxhub_v1_0_set_fault_enable_default(adev, value);
        mmhub_v1_0_set_fault_enable_default(adev, value);
-       gmc_v9_0_flush_gpu_tlb(adev, 0);
+       gmc_v9_0_flush_gpu_tlb(adev, 0, 0);
 
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(adev->gmc.gart_size >> 20),
index d0e478f434434b633be26692138042e896459714..0c9a2c03504e61be55109ba34c95266b8f9a422f 100644 (file)
@@ -508,19 +508,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
            pi->caps_db_ramping ||
            pi->caps_td_ramping ||
            pi->caps_tcp_ramping) {
-               adev->gfx.rlc.funcs->enter_safe_mode(adev);
+               amdgpu_gfx_rlc_enter_safe_mode(adev);
 
                if (enable) {
                        ret = kv_program_pt_config_registers(adev, didt_config_kv);
                        if (ret) {
-                               adev->gfx.rlc.funcs->exit_safe_mode(adev);
+                               amdgpu_gfx_rlc_exit_safe_mode(adev);
                                return ret;
                        }
                }
 
                kv_do_enable_didt(adev, enable);
 
-               adev->gfx.rlc.funcs->exit_safe_mode(adev);
+               amdgpu_gfx_rlc_exit_safe_mode(adev);
        }
 
        return 0;
index fd23ba1226a57d9d3f1189db15f07daaab7e199d..d0d966d6080a6dda87d57d2d8ee1ed2b58a1444a 100644 (file)
@@ -52,20 +52,25 @@ u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
        return base;
 }
 
-static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
+void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+                               uint64_t page_table_base)
 {
-       uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+       /* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */
+       int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+                       - mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
 
-       WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
-                    lower_32_bits(value));
+       WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+                       offset * vmid, lower_32_bits(page_table_base));
 
-       WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
-                    upper_32_bits(value));
+       WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+                       offset * vmid, upper_32_bits(page_table_base));
 }
 
 static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
 {
-       mmhub_v1_0_init_gart_pt_regs(adev);
+       uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+       mmhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
 
        WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
                     (u32)(adev->gmc.gart_start >> 12));
@@ -90,7 +95,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
 
        /* Program the system aperture low logical page number. */
        WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
-                    min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18);
+                    min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
 
        if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
                /*
@@ -100,11 +105,11 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
                 * to get rid of the VM fault and hardware hang.
                 */
                WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
-                            max((adev->gmc.vram_end >> 18) + 0x1,
+                            max((adev->gmc.fb_end >> 18) + 0x1,
                                 adev->gmc.agp_end >> 18));
        else
                WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
-                            max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
+                            max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
 
        /* Set default page address. */
        value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
index bef3d0c0c117979928302c098dff95520f323e1b..0de0fdf98c00d9c6144536620afec44dc5e66228 100644 (file)
@@ -34,5 +34,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
 void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
 void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
                                 bool enable);
+void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+                               uint64_t page_table_base);
 
 #endif
index 3f3fac2d50cdf19e002a5ad7773d68003ab5aa57..e5dd052d9e06a5af4ea8d76b99944f42617b3f63 100644 (file)
@@ -34,6 +34,7 @@
 #include "nbio/nbio_7_4_offset.h"
 
 MODULE_FIRMWARE("amdgpu/vega20_sos.bin");
+MODULE_FIRMWARE("amdgpu/vega20_ta.bin");
 
 /* address block */
 #define smnMP1_FIRMWARE_FLAGS          0x3010024
@@ -98,7 +99,8 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
        const char *chip_name;
        char fw_name[30];
        int err = 0;
-       const struct psp_firmware_header_v1_0 *hdr;
+       const struct psp_firmware_header_v1_0 *sos_hdr;
+       const struct ta_firmware_header_v1_0 *ta_hdr;
 
        DRM_DEBUG("\n");
 
@@ -119,16 +121,32 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
        if (err)
                goto out;
 
-       hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
-       adev->psp.sos_fw_version = le32_to_cpu(hdr->header.ucode_version);
-       adev->psp.sos_feature_version = le32_to_cpu(hdr->ucode_feature_version);
-       adev->psp.sos_bin_size = le32_to_cpu(hdr->sos_size_bytes);
-       adev->psp.sys_bin_size = le32_to_cpu(hdr->header.ucode_size_bytes) -
-                                       le32_to_cpu(hdr->sos_size_bytes);
-       adev->psp.sys_start_addr = (uint8_t *)hdr +
-                               le32_to_cpu(hdr->header.ucode_array_offset_bytes);
+       sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
+       adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
+       adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
+       adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
+       adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->header.ucode_size_bytes) -
+                                       le32_to_cpu(sos_hdr->sos_size_bytes);
+       adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
+                               le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
        adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
-                               le32_to_cpu(hdr->sos_offset_bytes);
+                               le32_to_cpu(sos_hdr->sos_offset_bytes);
+
+       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
+       err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+
+       err = amdgpu_ucode_validate(adev->psp.ta_fw);
+       if (err)
+               goto out;
+
+       ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
+       adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
+       adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
+       adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
+               le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
        return 0;
 out:
        if (err) {
@@ -167,7 +185,7 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
        /* Copy PSP System Driver binary to memory */
        memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
 
-       /* Provide the sys driver to bootrom */
+       /* Provide the sys driver to bootloader */
        WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
               (uint32_t)(psp->fw_pri_mc_addr >> 20));
        psp_gfxdrv_command_reg = 1 << 16;
@@ -208,7 +226,7 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
        /* Copy Secure OS binary to PSP memory */
        memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
 
-       /* Provide the PSP secure OS to bootrom */
+       /* Provide the PSP secure OS to bootloader */
        WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
               (uint32_t)(psp->fw_pri_mc_addr >> 20));
        psp_gfxdrv_command_reg = 2 << 16;
@@ -552,24 +570,110 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
 static int psp_v11_0_xgmi_get_topology_info(struct psp_context *psp,
        int number_devices, struct psp_xgmi_topology_info *topology)
 {
+       struct ta_xgmi_shared_memory *xgmi_cmd;
+       struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
+       struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
+       int i;
+       int ret;
+
+       if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
+               return -EINVAL;
+
+       xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+       memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+       /* Fill in the shared memory with topology information as input */
+       topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
+       xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
+       topology_info_input->num_nodes = number_devices;
+
+       for (i = 0; i < topology_info_input->num_nodes; i++) {
+               topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
+               topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
+               topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
+               topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
+       }
+
+       /* Invoke xgmi ta to get the topology information */
+       ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
+       if (ret)
+               return ret;
+
+       /* Read the output topology information from the shared memory */
+       topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
+       topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
+       for (i = 0; i < topology->num_nodes; i++) {
+               topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
+               topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
+               topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
+               topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
+       }
+
        return 0;
 }
 
 static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp,
        int number_devices, struct psp_xgmi_topology_info *topology)
 {
-       return 0;
+       struct ta_xgmi_shared_memory *xgmi_cmd;
+       struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
+       int i;
+
+       if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
+               return -EINVAL;
+
+       xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+       memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+       topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
+       xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
+       topology_info_input->num_nodes = number_devices;
+
+       for (i = 0; i < topology_info_input->num_nodes; i++) {
+               topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
+               topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
+               topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
+               topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
+       }
+
+       /* Invoke xgmi ta to set topology information */
+       return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
 }
 
 static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp)
 {
-       u64 hive_id = 0;
+       struct ta_xgmi_shared_memory *xgmi_cmd;
+       int ret;
+
+       xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+       memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+       xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
+
+       /* Invoke xgmi ta to get hive id */
+       ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+       if (ret)
+               return 0;
+       else
+               return xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
+}
+
+static u64 psp_v11_0_xgmi_get_node_id(struct psp_context *psp)
+{
+       struct ta_xgmi_shared_memory *xgmi_cmd;
+       int ret;
+
+       xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+       memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
 
-       /* Remove me when we can get correct hive_id through PSP */
-       if (psp->adev->gmc.xgmi.num_physical_nodes)
-               hive_id = 0x123456789abcdef;
+       xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
 
-       return hive_id;
+       /* Invoke xgmi ta to get the node id */
+       ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+       if (ret)
+               return 0;
+       else
+               return xgmi_cmd->xgmi_out_message.get_node_id.node_id;
 }
 
 static const struct psp_funcs psp_v11_0_funcs = {
@@ -587,6 +691,7 @@ static const struct psp_funcs psp_v11_0_funcs = {
        .xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info,
        .xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info,
        .xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id,
+       .xgmi_get_node_id = psp_v11_0_xgmi_get_node_id,
 };
 
 void psp_v11_0_set_psp_funcs(struct psp_context *psp)
index e1ebf770c30357323ca5d65f89cb29d3babd04a2..9cea0bbe452542b8939306960a29257696bb4567 100644 (file)
@@ -194,7 +194,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
        /* Copy PSP System Driver binary to memory */
        memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
 
-       /* Provide the sys driver to bootrom */
+       /* Provide the sys driver to bootloader */
        WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
               (uint32_t)(psp->fw_pri_mc_addr >> 20));
        psp_gfxdrv_command_reg = 1 << 16;
@@ -254,7 +254,7 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
        /* Copy Secure OS binary to PSP memory */
        memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
 
-       /* Provide the PSP secure OS to bootrom */
+       /* Provide the PSP secure OS to bootloader */
        WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
               (uint32_t)(psp->fw_pri_mc_addr >> 20));
        psp_gfxdrv_command_reg = 2 << 16;
index 2d4770e173dd373f6ece0bc6c65f9a6b627fead5..9f3cb2aec7c2842beb64864188de3e9fe47ebc7a 100644 (file)
@@ -225,7 +225,7 @@ static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
 
 static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 {
-       struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+       struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
        int i;
 
        for (i = 0; i < count; i++)
@@ -245,9 +245,12 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  * Schedule an IB in the DMA ring (VI).
  */
 static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
+                                  struct amdgpu_job *job,
                                   struct amdgpu_ib *ib,
-                                  unsigned vmid, bool ctx_switch)
+                                  bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
        /* IB packet must end on a 8 DW boundary */
        sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
 
@@ -349,8 +352,8 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
                ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
        }
-       sdma0->ready = false;
-       sdma1->ready = false;
+       sdma0->sched.ready = false;
+       sdma1->sched.ready = false;
 }
 
 /**
@@ -471,17 +474,15 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
                /* enable DMA IBs */
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
-               ring->ready = true;
+               ring->sched.ready = true;
        }
 
        sdma_v2_4_enable(adev, true);
        for (i = 0; i < adev->sdma.num_instances; i++) {
                ring = &adev->sdma.instance[i].ring;
-               r = amdgpu_ring_test_ring(ring);
-               if (r) {
-                       ring->ready = false;
+               r = amdgpu_ring_test_helper(ring);
+               if (r)
                        return r;
-               }
 
                if (adev->mman.buffer_funcs_ring == ring)
                        amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -550,21 +551,16 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
        u64 gpu_addr;
 
        r = amdgpu_device_wb_get(adev, &index);
-       if (r) {
-               dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+       if (r)
                return r;
-       }
 
        gpu_addr = adev->wb.gpu_addr + (index * 4);
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
 
        r = amdgpu_ring_alloc(ring, 5);
-       if (r) {
-               DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
-               amdgpu_device_wb_free(adev, index);
-               return r;
-       }
+       if (r)
+               goto error_free_wb;
 
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
                          SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
@@ -581,15 +577,11 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
                DRM_UDELAY(1);
        }
 
-       if (i < adev->usec_timeout) {
-               DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
-       } else {
-               DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-                         ring->idx, tmp);
-               r = -EINVAL;
-       }
-       amdgpu_device_wb_free(adev, index);
+       if (i >= adev->usec_timeout)
+               r = -ETIMEDOUT;
 
+error_free_wb:
+       amdgpu_device_wb_free(adev, index);
        return r;
 }
 
@@ -612,20 +604,16 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        long r;
 
        r = amdgpu_device_wb_get(adev, &index);
-       if (r) {
-               dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+       if (r)
                return r;
-       }
 
        gpu_addr = adev->wb.gpu_addr + (index * 4);
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
        memset(&ib, 0, sizeof(ib));
        r = amdgpu_ib_get(adev, NULL, 256, &ib);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+       if (r)
                goto err0;
-       }
 
        ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
                SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
@@ -644,21 +632,16 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
        r = dma_fence_wait_timeout(f, false, timeout);
        if (r == 0) {
-               DRM_ERROR("amdgpu: IB test timed out\n");
                r = -ETIMEDOUT;
                goto err1;
        } else if (r < 0) {
-               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
                goto err1;
        }
        tmp = le32_to_cpu(adev->wb.wb[index]);
-       if (tmp == 0xDEADBEEF) {
-               DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+       if (tmp == 0xDEADBEEF)
                r = 0;
-       } else {
-               DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+       else
                r = -EINVAL;
-       }
 
 err1:
        amdgpu_ib_free(adev, &ib, NULL);
@@ -760,7 +743,7 @@ static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
  */
 static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 {
-       struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+       struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
        u32 pad_count;
        int i;
 
@@ -1105,8 +1088,14 @@ static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev,
                                              struct amdgpu_irq_src *source,
                                              struct amdgpu_iv_entry *entry)
 {
+       u8 instance_id, queue_id;
+
        DRM_ERROR("Illegal instruction in SDMA command stream\n");
-       schedule_work(&adev->reset_work);
+       instance_id = (entry->ring_id & 0x3) >> 0;
+       queue_id = (entry->ring_id & 0xc) >> 2;
+
+       if (instance_id <= 1 && queue_id == 0)
+               drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
        return 0;
 }
 
index 6fb3edaba0ec065fdd57f5af7866e93006bf39b2..b6a25f92d566bc292805e4e019e7928b76bbd2f1 100644 (file)
@@ -399,7 +399,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
 
 static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 {
-       struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+       struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
        int i;
 
        for (i = 0; i < count; i++)
@@ -419,9 +419,12 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  * Schedule an IB in the DMA ring (VI).
  */
 static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
+                                  struct amdgpu_job *job,
                                   struct amdgpu_ib *ib,
-                                  unsigned vmid, bool ctx_switch)
+                                  bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
        /* IB packet must end on a 8 DW boundary */
        sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
 
@@ -523,8 +526,8 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
                ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
        }
-       sdma0->ready = false;
-       sdma1->ready = false;
+       sdma0->sched.ready = false;
+       sdma1->sched.ready = false;
 }
 
 /**
@@ -739,7 +742,7 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
                /* enable DMA IBs */
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
-               ring->ready = true;
+               ring->sched.ready = true;
        }
 
        /* unhalt the MEs */
@@ -749,11 +752,9 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                ring = &adev->sdma.instance[i].ring;
-               r = amdgpu_ring_test_ring(ring);
-               if (r) {
-                       ring->ready = false;
+               r = amdgpu_ring_test_helper(ring);
+               if (r)
                        return r;
-               }
 
                if (adev->mman.buffer_funcs_ring == ring)
                        amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -822,21 +823,16 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
        u64 gpu_addr;
 
        r = amdgpu_device_wb_get(adev, &index);
-       if (r) {
-               dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+       if (r)
                return r;
-       }
 
        gpu_addr = adev->wb.gpu_addr + (index * 4);
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
 
        r = amdgpu_ring_alloc(ring, 5);
-       if (r) {
-               DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
-               amdgpu_device_wb_free(adev, index);
-               return r;
-       }
+       if (r)
+               goto error_free_wb;
 
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
                          SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
@@ -853,15 +849,11 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
                DRM_UDELAY(1);
        }
 
-       if (i < adev->usec_timeout) {
-               DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
-       } else {
-               DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-                         ring->idx, tmp);
-               r = -EINVAL;
-       }
-       amdgpu_device_wb_free(adev, index);
+       if (i >= adev->usec_timeout)
+               r = -ETIMEDOUT;
 
+error_free_wb:
+       amdgpu_device_wb_free(adev, index);
        return r;
 }
 
@@ -884,20 +876,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        long r;
 
        r = amdgpu_device_wb_get(adev, &index);
-       if (r) {
-               dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+       if (r)
                return r;
-       }
 
        gpu_addr = adev->wb.gpu_addr + (index * 4);
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
        memset(&ib, 0, sizeof(ib));
        r = amdgpu_ib_get(adev, NULL, 256, &ib);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+       if (r)
                goto err0;
-       }
 
        ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
                SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
@@ -916,21 +904,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
        r = dma_fence_wait_timeout(f, false, timeout);
        if (r == 0) {
-               DRM_ERROR("amdgpu: IB test timed out\n");
                r = -ETIMEDOUT;
                goto err1;
        } else if (r < 0) {
-               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
                goto err1;
        }
        tmp = le32_to_cpu(adev->wb.wb[index]);
-       if (tmp == 0xDEADBEEF) {
-               DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+       if (tmp == 0xDEADBEEF)
                r = 0;
-       } else {
-               DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+       else
                r = -EINVAL;
-       }
 err1:
        amdgpu_ib_free(adev, &ib, NULL);
        dma_fence_put(f);
@@ -1031,7 +1014,7 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
  */
 static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 {
-       struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+       struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
        u32 pad_count;
        int i;
 
@@ -1440,8 +1423,14 @@ static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev,
                                              struct amdgpu_irq_src *source,
                                              struct amdgpu_iv_entry *entry)
 {
+       u8 instance_id, queue_id;
+
        DRM_ERROR("Illegal instruction in SDMA command stream\n");
-       schedule_work(&adev->reset_work);
+       instance_id = (entry->ring_id & 0x3) >> 0;
+       queue_id = (entry->ring_id & 0xc) >> 2;
+
+       if (instance_id <= 1 && queue_id == 0)
+               drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
        return 0;
 }
 
index 7a8c9172d30a946fd91d147f8c73267a51b1fb08..f4490cdd98046bd28028588cf91b919a3b54d308 100644 (file)
@@ -54,6 +54,11 @@ MODULE_FIRMWARE("amdgpu/raven2_sdma.bin");
 #define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK  0x000000F8L
 #define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
 
+#define WREG32_SDMA(instance, offset, value) \
+       WREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset)), value)
+#define RREG32_SDMA(instance, offset) \
+       RREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset)))
+
 static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev);
 static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
 static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -367,16 +372,11 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
                wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
                DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
        } else {
-               u32 lowbit, highbit;
-
-               lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2;
-               highbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
-
-               DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
-                               ring->me, highbit, lowbit);
-               wptr = highbit;
+               wptr = RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI);
                wptr = wptr << 32;
-               wptr |= lowbit;
+               wptr |= RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR);
+               DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n",
+                               ring->me, wptr);
        }
 
        return wptr >> 2;
@@ -417,14 +417,67 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
                                lower_32_bits(ring->wptr << 2),
                                ring->me,
                                upper_32_bits(ring->wptr << 2));
-               WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
-               WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+               WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR,
+                           lower_32_bits(ring->wptr << 2));
+               WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI,
+                           upper_32_bits(ring->wptr << 2));
+       }
+}
+
+/**
+ * sdma_v4_0_page_ring_get_wptr - get the current write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current wptr from the hardware (VEGA10+).
+ */
+static uint64_t sdma_v4_0_page_ring_get_wptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       u64 wptr;
+
+       if (ring->use_doorbell) {
+               /* XXX check if swapping is necessary on BE */
+               wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
+       } else {
+               wptr = RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI);
+               wptr = wptr << 32;
+               wptr |= RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR);
+       }
+
+       return wptr >> 2;
+}
+
+/**
+ * sdma_v4_0_ring_set_wptr - commit the write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Write the wptr back to the hardware (VEGA10+).
+ */
+static void sdma_v4_0_page_ring_set_wptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       if (ring->use_doorbell) {
+               u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
+
+               /* XXX check if swapping is necessary on BE */
+               WRITE_ONCE(*wb, (ring->wptr << 2));
+               WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
+       } else {
+               uint64_t wptr = ring->wptr << 2;
+
+               WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR,
+                           lower_32_bits(wptr));
+               WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI,
+                           upper_32_bits(wptr));
        }
 }
 
 static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 {
-       struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+       struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
        int i;
 
        for (i = 0; i < count; i++)
@@ -444,9 +497,12 @@ static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  * Schedule an IB in the DMA ring (VEGA10).
  */
 static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
-                                       struct amdgpu_ib *ib,
-                                       unsigned vmid, bool ctx_switch)
+                                  struct amdgpu_job *job,
+                                  struct amdgpu_ib *ib,
+                                  bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
        /* IB packet must end on a 8 DW boundary */
        sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
 
@@ -568,16 +624,16 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
                        amdgpu_ttm_set_buffer_funcs_status(adev, false);
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
-               rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
+               rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
-               ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
+               WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
+               ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
                ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+               WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
        }
 
-       sdma0->ready = false;
-       sdma1->ready = false;
+       sdma0->sched.ready = false;
+       sdma1->sched.ready = false;
 }
 
 /**
@@ -592,6 +648,39 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
        /* XXX todo */
 }
 
+/**
+ * sdma_v4_0_page_stop - stop the page async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the page async dma ring buffers (VEGA10).
+ */
+static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].page;
+       struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].page;
+       u32 rb_cntl, ib_cntl;
+       int i;
+
+       if ((adev->mman.buffer_funcs_ring == sdma0) ||
+           (adev->mman.buffer_funcs_ring == sdma1))
+               amdgpu_ttm_set_buffer_funcs_status(adev, false);
+
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
+               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
+                                       RB_ENABLE, 0);
+               WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
+               ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL);
+               ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL,
+                                       IB_ENABLE, 0);
+               WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
+       }
+
+       sdma0->sched.ready = false;
+       sdma1->sched.ready = false;
+}
+
 /**
  * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch
  *
@@ -630,18 +719,15 @@ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
        }
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
-               f32_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
+               f32_cntl = RREG32_SDMA(i, mmSDMA0_CNTL);
                f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
                                AUTO_CTXSW_ENABLE, enable ? 1 : 0);
                if (enable && amdgpu_sdma_phase_quantum) {
-                       WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
-                              phase_quantum);
-                       WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
-                              phase_quantum);
-                       WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
-                              phase_quantum);
+                       WREG32_SDMA(i, mmSDMA0_PHASE0_QUANTUM, phase_quantum);
+                       WREG32_SDMA(i, mmSDMA0_PHASE1_QUANTUM, phase_quantum);
+                       WREG32_SDMA(i, mmSDMA0_PHASE2_QUANTUM, phase_quantum);
                }
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
+               WREG32_SDMA(i, mmSDMA0_CNTL, f32_cntl);
        }
 
 }
@@ -662,156 +748,217 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
        if (enable == false) {
                sdma_v4_0_gfx_stop(adev);
                sdma_v4_0_rlc_stop(adev);
+               if (adev->sdma.has_page_queue)
+                       sdma_v4_0_page_stop(adev);
        }
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
-               f32_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
+               f32_cntl = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
                f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
+               WREG32_SDMA(i, mmSDMA0_F32_CNTL, f32_cntl);
        }
 }
 
+/**
+ * sdma_v4_0_rb_cntl - get parameters for rb_cntl
+ */
+static uint32_t sdma_v4_0_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
+{
+       /* Set ring buffer size in dwords */
+       uint32_t rb_bufsz = order_base_2(ring->ring_size / 4);
+
+       rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
+#ifdef __BIG_ENDIAN
+       rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
+       rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
+                               RPTR_WRITEBACK_SWAP_ENABLE, 1);
+#endif
+       return rb_cntl;
+}
+
 /**
  * sdma_v4_0_gfx_resume - setup and start the async dma engines
  *
  * @adev: amdgpu_device pointer
+ * @i: instance to resume
  *
  * Set up the gfx DMA ring buffers and enable them (VEGA10).
  * Returns 0 for success, error for failure.
  */
-static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
+static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
 {
-       struct amdgpu_ring *ring;
+       struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
        u32 rb_cntl, ib_cntl, wptr_poll_cntl;
-       u32 rb_bufsz;
        u32 wb_offset;
        u32 doorbell;
        u32 doorbell_offset;
-       u32 temp;
        u64 wptr_gpu_addr;
-       int i, r;
-
-       for (i = 0; i < adev->sdma.num_instances; i++) {
-               ring = &adev->sdma.instance[i].ring;
-               wb_offset = (ring->rptr_offs * 4);
 
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+       wb_offset = (ring->rptr_offs * 4);
 
-               /* Set ring buffer size in dwords */
-               rb_bufsz = order_base_2(ring->ring_size / 4);
-               rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
-               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
-#ifdef __BIG_ENDIAN
-               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
-               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
-                                       RPTR_WRITEBACK_SWAP_ENABLE, 1);
-#endif
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+       rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
+       rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
+       WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
 
-               /* Initialize the ring buffer's read and write pointers */
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
+       /* Initialize the ring buffer's read and write pointers */
+       WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR, 0);
+       WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_HI, 0);
+       WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR, 0);
+       WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_HI, 0);
 
-               /* set the wb address whether it's enabled or not */
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
-                      upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
-                      lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
+       /* set the wb address whether it's enabled or not */
+       WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_HI,
+              upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+       WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_LO,
+              lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
 
-               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
+       rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
+                               RPTR_WRITEBACK_ENABLE, 1);
 
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
+       WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE, ring->gpu_addr >> 8);
+       WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE_HI, ring->gpu_addr >> 40);
 
-               ring->wptr = 0;
+       ring->wptr = 0;
 
-               /* before programing wptr to a less value, need set minor_ptr_update first */
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
+       /* before programing wptr to a less value, need set minor_ptr_update first */
+       WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 1);
 
-               if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
-                       WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
-                       WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
-               }
+       doorbell = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL);
+       doorbell_offset = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET);
 
-               doorbell = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
-               doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
-
-               if (ring->use_doorbell) {
-                       doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
-                       doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
+       doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE,
+                                ring->use_doorbell);
+       doorbell_offset = REG_SET_FIELD(doorbell_offset,
+                                       SDMA0_GFX_DOORBELL_OFFSET,
                                        OFFSET, ring->doorbell_index);
-               } else {
-                       doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
-               }
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
-               adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
-                                                     ring->doorbell_index);
+       WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL, doorbell);
+       WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET, doorbell_offset);
+       adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+                                             ring->doorbell_index);
+
+       sdma_v4_0_ring_set_wptr(ring);
+
+       /* set minor_ptr_update to 0 after wptr programed */
+       WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 0);
+
+       /* setup the wptr shadow polling */
+       wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+       WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO,
+                   lower_32_bits(wptr_gpu_addr));
+       WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI,
+                   upper_32_bits(wptr_gpu_addr));
+       wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL);
+       wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+                                      SDMA0_GFX_RB_WPTR_POLL_CNTL,
+                                      F32_POLL_ENABLE, amdgpu_sriov_vf(adev));
+       WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
+
+       /* enable DMA RB */
+       rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
+       WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
+
+       ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
+       ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
+#ifdef __BIG_ENDIAN
+       ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
+#endif
+       /* enable DMA IBs */
+       WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
 
-               if (amdgpu_sriov_vf(adev))
-                       sdma_v4_0_ring_set_wptr(ring);
+       ring->sched.ready = true;
+}
 
-               /* set minor_ptr_update to 0 after wptr programed */
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
+/**
+ * sdma_v4_0_page_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ * @i: instance to resume
+ *
+ * Set up the page DMA ring buffers and enable them (VEGA10).
+ * Returns 0 for success, error for failure.
+ */
+static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
+{
+       struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
+       u32 rb_cntl, ib_cntl, wptr_poll_cntl;
+       u32 wb_offset;
+       u32 doorbell;
+       u32 doorbell_offset;
+       u64 wptr_gpu_addr;
 
-               /* set utc l1 enable flag always to 1 */
-               temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
-               temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
+       wb_offset = (ring->rptr_offs * 4);
 
-               if (!amdgpu_sriov_vf(adev)) {
-                       /* unhalt engine */
-                       temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
-                       temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
-                       WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
-               }
+       rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
+       rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
+       WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
 
-               /* setup the wptr shadow polling */
-               wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
-                      lower_32_bits(wptr_gpu_addr));
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
-                      upper_32_bits(wptr_gpu_addr));
-               wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
-               if (amdgpu_sriov_vf(adev))
-                       wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
-               else
-                       wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0);
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl);
+       /* Initialize the ring buffer's read and write pointers */
+       WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR, 0);
+       WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_HI, 0);
+       WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR, 0);
+       WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_HI, 0);
 
-               /* enable DMA RB */
-               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+       /* set the wb address whether it's enabled or not */
+       WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_HI,
+              upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+       WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_LO,
+              lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
 
-               ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
-               ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
-#ifdef __BIG_ENDIAN
-               ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
-#endif
-               /* enable DMA IBs */
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+       rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
+                               RPTR_WRITEBACK_ENABLE, 1);
 
-               ring->ready = true;
+       WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE, ring->gpu_addr >> 8);
+       WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE_HI, ring->gpu_addr >> 40);
 
-               if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
-                       sdma_v4_0_ctx_switch_enable(adev, true);
-                       sdma_v4_0_enable(adev, true);
-               }
+       ring->wptr = 0;
 
-               r = amdgpu_ring_test_ring(ring);
-               if (r) {
-                       ring->ready = false;
-                       return r;
-               }
+       /* before programing wptr to a less value, need set minor_ptr_update first */
+       WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 1);
 
-               if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_buffer_funcs_status(adev, true);
+       doorbell = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL);
+       doorbell_offset = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET);
 
-       }
+       doorbell = REG_SET_FIELD(doorbell, SDMA0_PAGE_DOORBELL, ENABLE,
+                                ring->use_doorbell);
+       doorbell_offset = REG_SET_FIELD(doorbell_offset,
+                                       SDMA0_PAGE_DOORBELL_OFFSET,
+                                       OFFSET, ring->doorbell_index);
+       WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL, doorbell);
+       WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET, doorbell_offset);
+       /* TODO: enable doorbell support */
+       /*adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+                                             ring->doorbell_index);*/
+
+       sdma_v4_0_ring_set_wptr(ring);
+
+       /* set minor_ptr_update to 0 after wptr programed */
+       WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 0);
+
+       /* setup the wptr shadow polling */
+       wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+       WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO,
+                   lower_32_bits(wptr_gpu_addr));
+       WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI,
+                   upper_32_bits(wptr_gpu_addr));
+       wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL);
+       wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+                                      SDMA0_PAGE_RB_WPTR_POLL_CNTL,
+                                      F32_POLL_ENABLE, amdgpu_sriov_vf(adev));
+       WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
+
+       /* enable DMA RB */
+       rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL, RB_ENABLE, 1);
+       WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
+
+       ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL);
+       ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_ENABLE, 1);
+#ifdef __BIG_ENDIAN
+       ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_SWAP_ENABLE, 1);
+#endif
+       /* enable DMA IBs */
+       WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
 
-       return 0;
+       ring->sched.ready = true;
 }
 
 static void
@@ -922,12 +1069,14 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
                        (adev->sdma.instance[i].fw->data +
                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0);
+               WREG32_SDMA(i, mmSDMA0_UCODE_ADDR, 0);
 
                for (j = 0; j < fw_size; j++)
-                       WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
+                       WREG32_SDMA(i, mmSDMA0_UCODE_DATA,
+                                   le32_to_cpup(fw_data++));
 
-               WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
+               WREG32_SDMA(i, mmSDMA0_UCODE_ADDR,
+                           adev->sdma.instance[i].fw_version);
        }
 
        return 0;
@@ -943,33 +1092,78 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
  */
 static int sdma_v4_0_start(struct amdgpu_device *adev)
 {
-       int r = 0;
+       struct amdgpu_ring *ring;
+       int i, r;
 
        if (amdgpu_sriov_vf(adev)) {
                sdma_v4_0_ctx_switch_enable(adev, false);
                sdma_v4_0_enable(adev, false);
+       } else {
 
-               /* set RB registers */
-               r = sdma_v4_0_gfx_resume(adev);
-               return r;
+               if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+                       r = sdma_v4_0_load_microcode(adev);
+                       if (r)
+                               return r;
+               }
+
+               /* unhalt the MEs */
+               sdma_v4_0_enable(adev, true);
+               /* enable sdma ring preemption */
+               sdma_v4_0_ctx_switch_enable(adev, true);
+       }
+
+       /* start the gfx rings and rlc compute queues */
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               uint32_t temp;
+
+               WREG32_SDMA(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+               sdma_v4_0_gfx_resume(adev, i);
+               if (adev->sdma.has_page_queue)
+                       sdma_v4_0_page_resume(adev, i);
+
+               /* set utc l1 enable flag always to 1 */
+               temp = RREG32_SDMA(i, mmSDMA0_CNTL);
+               temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
+               WREG32_SDMA(i, mmSDMA0_CNTL, temp);
+
+               if (!amdgpu_sriov_vf(adev)) {
+                       /* unhalt engine */
+                       temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
+                       temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
+                       WREG32_SDMA(i, mmSDMA0_F32_CNTL, temp);
+               }
        }
 
-       if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
-               r = sdma_v4_0_load_microcode(adev);
+       if (amdgpu_sriov_vf(adev)) {
+               sdma_v4_0_ctx_switch_enable(adev, true);
+               sdma_v4_0_enable(adev, true);
+       } else {
+               r = sdma_v4_0_rlc_resume(adev);
                if (r)
                        return r;
        }
 
-       /* unhalt the MEs */
-       sdma_v4_0_enable(adev, true);
-       /* enable sdma ring preemption */
-       sdma_v4_0_ctx_switch_enable(adev, true);
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
 
-       /* start the gfx rings and rlc compute queues */
-       r = sdma_v4_0_gfx_resume(adev);
-       if (r)
-               return r;
-       r = sdma_v4_0_rlc_resume(adev);
+               r = amdgpu_ring_test_helper(ring);
+               if (r)
+                       return r;
+
+               if (adev->sdma.has_page_queue) {
+                       struct amdgpu_ring *page = &adev->sdma.instance[i].page;
+
+                       r = amdgpu_ring_test_helper(page);
+                       if (r)
+                               return r;
+
+                       if (adev->mman.buffer_funcs_ring == page)
+                               amdgpu_ttm_set_buffer_funcs_status(adev, true);
+               }
+
+               if (adev->mman.buffer_funcs_ring == ring)
+                       amdgpu_ttm_set_buffer_funcs_status(adev, true);
+       }
 
        return r;
 }
@@ -993,21 +1187,16 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
        u64 gpu_addr;
 
        r = amdgpu_device_wb_get(adev, &index);
-       if (r) {
-               dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+       if (r)
                return r;
-       }
 
        gpu_addr = adev->wb.gpu_addr + (index * 4);
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
 
        r = amdgpu_ring_alloc(ring, 5);
-       if (r) {
-               DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
-               amdgpu_device_wb_free(adev, index);
-               return r;
-       }
+       if (r)
+               goto error_free_wb;
 
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
                          SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
@@ -1024,15 +1213,11 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
                DRM_UDELAY(1);
        }
 
-       if (i < adev->usec_timeout) {
-               DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
-       } else {
-               DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-                         ring->idx, tmp);
-               r = -EINVAL;
-       }
-       amdgpu_device_wb_free(adev, index);
+       if (i >= adev->usec_timeout)
+               r = -ETIMEDOUT;
 
+error_free_wb:
+       amdgpu_device_wb_free(adev, index);
        return r;
 }
 
@@ -1055,20 +1240,16 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        u64 gpu_addr;
 
        r = amdgpu_device_wb_get(adev, &index);
-       if (r) {
-               dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+       if (r)
                return r;
-       }
 
        gpu_addr = adev->wb.gpu_addr + (index * 4);
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
        memset(&ib, 0, sizeof(ib));
        r = amdgpu_ib_get(adev, NULL, 256, &ib);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+       if (r)
                goto err0;
-       }
 
        ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
                SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
@@ -1087,21 +1268,17 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
        r = dma_fence_wait_timeout(f, false, timeout);
        if (r == 0) {
-               DRM_ERROR("amdgpu: IB test timed out\n");
                r = -ETIMEDOUT;
                goto err1;
        } else if (r < 0) {
-               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
                goto err1;
        }
        tmp = le32_to_cpu(adev->wb.wb[index]);
-       if (tmp == 0xDEADBEEF) {
-               DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+       if (tmp == 0xDEADBEEF)
                r = 0;
-       } else {
-               DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+       else
                r = -EINVAL;
-       }
+
 err1:
        amdgpu_ib_free(adev, &ib, NULL);
        dma_fence_put(f);
@@ -1206,7 +1383,7 @@ static void sdma_v4_0_vm_set_pte_pde(struct amdgpu_ib *ib,
  */
 static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 {
-       struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+       struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
        u32 pad_count;
        int i;
 
@@ -1276,10 +1453,18 @@ static int sdma_v4_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (adev->asic_type == CHIP_RAVEN)
+       if (adev->asic_type == CHIP_RAVEN) {
                adev->sdma.num_instances = 1;
-       else
+               adev->sdma.has_page_queue = false;
+       } else {
                adev->sdma.num_instances = 2;
+               /* TODO: Page queue breaks driver reload under SRIOV */
+               if ((adev->asic_type == CHIP_VEGA10) && amdgpu_sriov_vf((adev)))
+                       adev->sdma.has_page_queue = false;
+               else if (adev->asic_type != CHIP_VEGA20 &&
+                               adev->asic_type != CHIP_VEGA12)
+                       adev->sdma.has_page_queue = true;
+       }
 
        sdma_v4_0_set_ring_funcs(adev);
        sdma_v4_0_set_buffer_funcs(adev);
@@ -1340,6 +1525,21 @@ static int sdma_v4_0_sw_init(void *handle)
                                     AMDGPU_SDMA_IRQ_TRAP1);
                if (r)
                        return r;
+
+               if (adev->sdma.has_page_queue) {
+                       ring = &adev->sdma.instance[i].page;
+                       ring->ring_obj = NULL;
+                       ring->use_doorbell = false;
+
+                       sprintf(ring->name, "page%d", i);
+                       r = amdgpu_ring_init(adev, ring, 1024,
+                                            &adev->sdma.trap_irq,
+                                            (i == 0) ?
+                                            AMDGPU_SDMA_IRQ_TRAP0 :
+                                            AMDGPU_SDMA_IRQ_TRAP1);
+                       if (r)
+                               return r;
+               }
        }
 
        return r;
@@ -1350,8 +1550,11 @@ static int sdma_v4_0_sw_fini(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        int i;
 
-       for (i = 0; i < adev->sdma.num_instances; i++)
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+               if (adev->sdma.has_page_queue)
+                       amdgpu_ring_fini(&adev->sdma.instance[i].page);
+       }
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                release_firmware(adev->sdma.instance[i].fw);
@@ -1414,7 +1617,7 @@ static bool sdma_v4_0_is_idle(void *handle)
        u32 i;
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
-               u32 tmp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_STATUS_REG));
+               u32 tmp = RREG32_SDMA(i, mmSDMA0_STATUS_REG);
 
                if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
                        return false;
@@ -1430,8 +1633,8 @@ static int sdma_v4_0_wait_for_idle(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        for (i = 0; i < adev->usec_timeout; i++) {
-               sdma0 = RREG32(sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
-               sdma1 = RREG32(sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG));
+               sdma0 = RREG32_SDMA(0, mmSDMA0_STATUS_REG);
+               sdma1 = RREG32_SDMA(1, mmSDMA0_STATUS_REG);
 
                if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
                        return 0;
@@ -1452,16 +1655,13 @@ static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
                                        unsigned type,
                                        enum amdgpu_interrupt_state state)
 {
+       unsigned int instance = (type == AMDGPU_SDMA_IRQ_TRAP0) ? 0 : 1;
        u32 sdma_cntl;
 
-       u32 reg_offset = (type == AMDGPU_SDMA_IRQ_TRAP0) ?
-               sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
-               sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
-
-       sdma_cntl = RREG32(reg_offset);
+       sdma_cntl = RREG32_SDMA(instance, mmSDMA0_CNTL);
        sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
                       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
-       WREG32(reg_offset, sdma_cntl);
+       WREG32_SDMA(instance, mmSDMA0_CNTL, sdma_cntl);
 
        return 0;
 }
@@ -1470,39 +1670,32 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
                                      struct amdgpu_irq_src *source,
                                      struct amdgpu_iv_entry *entry)
 {
+       uint32_t instance;
+
        DRM_DEBUG("IH: SDMA trap\n");
        switch (entry->client_id) {
        case SOC15_IH_CLIENTID_SDMA0:
-               switch (entry->ring_id) {
-               case 0:
-                       amdgpu_fence_process(&adev->sdma.instance[0].ring);
-                       break;
-               case 1:
-                       /* XXX compute */
-                       break;
-               case 2:
-                       /* XXX compute */
-                       break;
-               case 3:
-                       /* XXX page queue*/
-                       break;
-               }
+               instance = 0;
                break;
        case SOC15_IH_CLIENTID_SDMA1:
-               switch (entry->ring_id) {
-               case 0:
-                       amdgpu_fence_process(&adev->sdma.instance[1].ring);
-                       break;
-               case 1:
-                       /* XXX compute */
-                       break;
-               case 2:
-                       /* XXX compute */
-                       break;
-               case 3:
-                       /* XXX page queue*/
-                       break;
-               }
+               instance = 1;
+               break;
+       default:
+               return 0;
+       }
+
+       switch (entry->ring_id) {
+       case 0:
+               amdgpu_fence_process(&adev->sdma.instance[instance].ring);
+               break;
+       case 1:
+               /* XXX compute */
+               break;
+       case 2:
+               /* XXX compute */
+               break;
+       case 3:
+               amdgpu_fence_process(&adev->sdma.instance[instance].page);
                break;
        }
        return 0;
@@ -1512,12 +1705,29 @@ static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
                                              struct amdgpu_irq_src *source,
                                              struct amdgpu_iv_entry *entry)
 {
+       int instance;
+
        DRM_ERROR("Illegal instruction in SDMA command stream\n");
-       schedule_work(&adev->reset_work);
+
+       switch (entry->client_id) {
+       case SOC15_IH_CLIENTID_SDMA0:
+               instance = 0;
+               break;
+       case SOC15_IH_CLIENTID_SDMA1:
+               instance = 1;
+               break;
+       default:
+               return 0;
+       }
+
+       switch (entry->ring_id) {
+       case 0:
+               drm_sched_fault(&adev->sdma.instance[instance].ring.sched);
+               break;
+       }
        return 0;
 }
 
-
 static void sdma_v4_0_update_medium_grain_clock_gating(
                struct amdgpu_device *adev,
                bool enable)
@@ -1730,6 +1940,38 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
        .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
 };
 
+static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
+       .type = AMDGPU_RING_TYPE_SDMA,
+       .align_mask = 0xf,
+       .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
+       .support_64bit_ptrs = true,
+       .vmhub = AMDGPU_MMHUB,
+       .get_rptr = sdma_v4_0_ring_get_rptr,
+       .get_wptr = sdma_v4_0_page_ring_get_wptr,
+       .set_wptr = sdma_v4_0_page_ring_set_wptr,
+       .emit_frame_size =
+               6 + /* sdma_v4_0_ring_emit_hdp_flush */
+               3 + /* hdp invalidate */
+               6 + /* sdma_v4_0_ring_emit_pipeline_sync */
+               /* sdma_v4_0_ring_emit_vm_flush */
+               SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+               SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
+               10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
+       .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
+       .emit_ib = sdma_v4_0_ring_emit_ib,
+       .emit_fence = sdma_v4_0_ring_emit_fence,
+       .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
+       .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
+       .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
+       .test_ring = sdma_v4_0_ring_test_ring,
+       .test_ib = sdma_v4_0_ring_test_ib,
+       .insert_nop = sdma_v4_0_ring_insert_nop,
+       .pad_ib = sdma_v4_0_ring_pad_ib,
+       .emit_wreg = sdma_v4_0_ring_emit_wreg,
+       .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
+       .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
 static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
 {
        int i;
@@ -1737,6 +1979,10 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
        for (i = 0; i < adev->sdma.num_instances; i++) {
                adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
                adev->sdma.instance[i].ring.me = i;
+               if (adev->sdma.has_page_queue) {
+                       adev->sdma.instance[i].page.funcs = &sdma_v4_0_page_ring_funcs;
+                       adev->sdma.instance[i].page.me = i;
+               }
        }
 }
 
@@ -1818,7 +2064,10 @@ static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
 static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
 {
        adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
-       adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+       if (adev->sdma.has_page_queue)
+               adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
+       else
+               adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
 }
 
 static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
@@ -1836,7 +2085,10 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
 
        adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
        for (i = 0; i < adev->sdma.num_instances; i++) {
-               sched = &adev->sdma.instance[i].ring.sched;
+               if (adev->sdma.has_page_queue)
+                       sched = &adev->sdma.instance[i].page.sched;
+               else
+                       sched = &adev->sdma.instance[i].ring.sched;
                adev->vm_manager.vm_pte_rqs[i] =
                        &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
        }
index adbaea6da0d71a28a5b2cc510614fcde9692d9b6..b6e473134e19fae3bb107fa9160676616ecd44fc 100644 (file)
@@ -61,9 +61,11 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
 }
 
 static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
+                               struct amdgpu_job *job,
                                struct amdgpu_ib *ib,
-                               unsigned vmid, bool ctx_switch)
+                               bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
        /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
         * Pad as necessary with NOPs.
         */
@@ -122,7 +124,7 @@ static void si_dma_stop(struct amdgpu_device *adev)
 
                if (adev->mman.buffer_funcs_ring == ring)
                        amdgpu_ttm_set_buffer_funcs_status(adev, false);
-               ring->ready = false;
+               ring->sched.ready = false;
        }
 }
 
@@ -175,13 +177,11 @@ static int si_dma_start(struct amdgpu_device *adev)
                WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
                WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
 
-               ring->ready = true;
+               ring->sched.ready = true;
 
-               r = amdgpu_ring_test_ring(ring);
-               if (r) {
-                       ring->ready = false;
+               r = amdgpu_ring_test_helper(ring);
+               if (r)
                        return r;
-               }
 
                if (adev->mman.buffer_funcs_ring == ring)
                        amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -209,21 +209,16 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
        u64 gpu_addr;
 
        r = amdgpu_device_wb_get(adev, &index);
-       if (r) {
-               dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+       if (r)
                return r;
-       }
 
        gpu_addr = adev->wb.gpu_addr + (index * 4);
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
 
        r = amdgpu_ring_alloc(ring, 4);
-       if (r) {
-               DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
-               amdgpu_device_wb_free(adev, index);
-               return r;
-       }
+       if (r)
+               goto error_free_wb;
 
        amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1));
        amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
@@ -238,15 +233,11 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
                DRM_UDELAY(1);
        }
 
-       if (i < adev->usec_timeout) {
-               DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
-       } else {
-               DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-                         ring->idx, tmp);
-               r = -EINVAL;
-       }
-       amdgpu_device_wb_free(adev, index);
+       if (i >= adev->usec_timeout)
+               r = -ETIMEDOUT;
 
+error_free_wb:
+       amdgpu_device_wb_free(adev, index);
        return r;
 }
 
@@ -269,20 +260,16 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        long r;
 
        r = amdgpu_device_wb_get(adev, &index);
-       if (r) {
-               dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+       if (r)
                return r;
-       }
 
        gpu_addr = adev->wb.gpu_addr + (index * 4);
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
        memset(&ib, 0, sizeof(ib));
        r = amdgpu_ib_get(adev, NULL, 256, &ib);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+       if (r)
                goto err0;
-       }
 
        ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1);
        ib.ptr[1] = lower_32_bits(gpu_addr);
@@ -295,21 +282,16 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
        r = dma_fence_wait_timeout(f, false, timeout);
        if (r == 0) {
-               DRM_ERROR("amdgpu: IB test timed out\n");
                r = -ETIMEDOUT;
                goto err1;
        } else if (r < 0) {
-               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
                goto err1;
        }
        tmp = le32_to_cpu(adev->wb.wb[index]);
-       if (tmp == 0xDEADBEEF) {
-               DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+       if (tmp == 0xDEADBEEF)
                r = 0;
-       } else {
-               DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+       else
                r = -EINVAL;
-       }
 
 err1:
        amdgpu_ib_free(adev, &ib, NULL);
@@ -658,15 +640,6 @@ static int si_dma_process_trap_irq(struct amdgpu_device *adev,
        return 0;
 }
 
-static int si_dma_process_illegal_inst_irq(struct amdgpu_device *adev,
-                                             struct amdgpu_irq_src *source,
-                                             struct amdgpu_iv_entry *entry)
-{
-       DRM_ERROR("Illegal instruction in SDMA command stream\n");
-       schedule_work(&adev->reset_work);
-       return 0;
-}
-
 static int si_dma_set_clockgating_state(void *handle,
                                          enum amd_clockgating_state state)
 {
@@ -781,15 +754,10 @@ static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = {
        .process = si_dma_process_trap_irq,
 };
 
-static const struct amdgpu_irq_src_funcs si_dma_illegal_inst_irq_funcs = {
-       .process = si_dma_process_illegal_inst_irq,
-};
-
 static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
 {
        adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
        adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs;
-       adev->sdma.illegal_inst_irq.funcs = &si_dma_illegal_inst_irq_funcs;
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h b/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h
new file mode 100644 (file)
index 0000000..ac2c27b
--- /dev/null
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _TA_XGMI_IF_H
+#define _TA_XGMI_IF_H
+
+/* Responses have bit 31 set */
+#define RSP_ID_MASK (1U << 31)
+#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
+
+enum ta_command_xgmi {
+       TA_COMMAND_XGMI__INITIALIZE                     = 0x00,
+       TA_COMMAND_XGMI__GET_NODE_ID                    = 0x01,
+       TA_COMMAND_XGMI__GET_HIVE_ID                    = 0x02,
+       TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO          = 0x03,
+       TA_COMMAND_XGMI__SET_TOPOLOGY_INFO              = 0x04
+};
+
+/* XGMI related enumerations */
+/**********************************************************/;
+enum ta_xgmi_connected_nodes {
+       TA_XGMI__MAX_CONNECTED_NODES                    = 64
+};
+
+enum ta_xgmi_status {
+       TA_XGMI_STATUS__SUCCESS                         = 0x00,
+       TA_XGMI_STATUS__GENERIC_FAILURE                 = 0x01,
+       TA_XGMI_STATUS__NULL_POINTER                    = 0x02,
+       TA_XGMI_STATUS__INVALID_PARAMETER               = 0x03,
+       TA_XGMI_STATUS__NOT_INITIALIZED                 = 0x04,
+       TA_XGMI_STATUS__INVALID_NODE_NUM                = 0x05,
+       TA_XGMI_STATUS__INVALID_NODE_ID                 = 0x06,
+       TA_XGMI_STATUS__INVALID_TOPOLOGY                = 0x07,
+       TA_XGMI_STATUS__FAILED_ID_GEN                   = 0x08,
+       TA_XGMI_STATUS__FAILED_TOPOLOGY_INIT            = 0x09,
+       TA_XGMI_STATUS__SET_SHARING_ERROR               = 0x0A
+};
+
+enum ta_xgmi_assigned_sdma_engine {
+       TA_XGMI_ASSIGNED_SDMA_ENGINE__NOT_ASSIGNED      = -1,
+       TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA0             = 0,
+       TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA1             = 1,
+       TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA2             = 2,
+       TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA3             = 3,
+       TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA4             = 4,
+       TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA5             = 5
+};
+
+/* input/output structures for XGMI commands */
+/**********************************************************/
+struct ta_xgmi_node_info {
+       uint64_t                                node_id;
+       uint8_t                                 num_hops;
+       uint8_t                                 is_sharing_enabled;
+       enum ta_xgmi_assigned_sdma_engine       sdma_engine;
+};
+
+struct ta_xgmi_cmd_initialize_output {
+       uint32_t        status;
+};
+
+struct ta_xgmi_cmd_get_node_id_output {
+       uint64_t        node_id;
+};
+
+struct ta_xgmi_cmd_get_hive_id_output {
+       uint64_t        hive_id;
+};
+
+struct ta_xgmi_cmd_get_topology_info_input {
+       uint32_t                        num_nodes;
+       struct ta_xgmi_node_info        nodes[TA_XGMI__MAX_CONNECTED_NODES];
+};
+
+struct ta_xgmi_cmd_get_topology_info_output {
+       uint32_t                        num_nodes;
+       struct ta_xgmi_node_info        nodes[TA_XGMI__MAX_CONNECTED_NODES];
+};
+
+struct ta_xgmi_cmd_set_topology_info_input {
+       uint32_t                        num_nodes;
+       struct ta_xgmi_node_info        nodes[TA_XGMI__MAX_CONNECTED_NODES];
+};
+
+/**********************************************************/
+/* Common input structure for XGMI callbacks */
+union ta_xgmi_cmd_input {
+       struct ta_xgmi_cmd_get_topology_info_input      get_topology_info;
+       struct ta_xgmi_cmd_set_topology_info_input      set_topology_info;
+};
+
+/* Common output structure for XGMI callbacks */
+union ta_xgmi_cmd_output {
+       struct ta_xgmi_cmd_initialize_output            initialize;
+       struct ta_xgmi_cmd_get_node_id_output           get_node_id;
+       struct ta_xgmi_cmd_get_hive_id_output           get_hive_id;
+       struct ta_xgmi_cmd_get_topology_info_output     get_topology_info;
+};
+/**********************************************************/
+
+struct ta_xgmi_shared_memory {
+       uint32_t                        cmd_id;
+       uint32_t                        resp_id;
+       enum ta_xgmi_status             xgmi_status;
+       uint32_t                        reserved;
+       union ta_xgmi_cmd_input         xgmi_in_message;
+       union ta_xgmi_cmd_output        xgmi_out_message;
+};
+
+#endif   //_TA_XGMI_IF_H
index 1fc17bf39fed710f77c8ff94d741af579965639d..90bbcee00f289e007f945e0ed66f6e604b148c87 100644 (file)
@@ -162,12 +162,9 @@ static int uvd_v4_2_hw_init(void *handle)
        uvd_v4_2_enable_mgcg(adev, true);
        amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
 
-       ring->ready = true;
-       r = amdgpu_ring_test_ring(ring);
-       if (r) {
-               ring->ready = false;
+       r = amdgpu_ring_test_helper(ring);
+       if (r)
                goto done;
-       }
 
        r = amdgpu_ring_alloc(ring, 10);
        if (r) {
@@ -218,7 +215,7 @@ static int uvd_v4_2_hw_fini(void *handle)
        if (RREG32(mmUVD_STATUS) != 0)
                uvd_v4_2_stop(adev);
 
-       ring->ready = false;
+       ring->sched.ready = false;
 
        return 0;
 }
@@ -484,11 +481,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
 
        WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
        r = amdgpu_ring_alloc(ring, 3);
-       if (r) {
-               DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
-                         ring->idx, r);
+       if (r)
                return r;
-       }
+
        amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
        amdgpu_ring_write(ring, 0xDEADBEEF);
        amdgpu_ring_commit(ring);
@@ -499,14 +494,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
                DRM_UDELAY(1);
        }
 
-       if (i < adev->usec_timeout) {
-               DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-                        ring->idx, i);
-       } else {
-               DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-                         ring->idx, tmp);
-               r = -EINVAL;
-       }
+       if (i >= adev->usec_timeout)
+               r = -ETIMEDOUT;
+
        return r;
 }
 
@@ -519,8 +509,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
  * Write ring commands to execute the indirect buffer
  */
 static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
+                                 struct amdgpu_job *job,
                                  struct amdgpu_ib *ib,
-                                 unsigned vmid, bool ctx_switch)
+                                 bool ctx_switch)
 {
        amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
        amdgpu_ring_write(ring, ib->gpu_addr);
index fde6ad5ac9ab3ff8dc640a5a73cfb32dd99976e6..1c5e127031037e9b7dcfe47b52230ec12264dfca 100644 (file)
@@ -158,12 +158,9 @@ static int uvd_v5_0_hw_init(void *handle)
        uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
        uvd_v5_0_enable_mgcg(adev, true);
 
-       ring->ready = true;
-       r = amdgpu_ring_test_ring(ring);
-       if (r) {
-               ring->ready = false;
+       r = amdgpu_ring_test_helper(ring);
+       if (r)
                goto done;
-       }
 
        r = amdgpu_ring_alloc(ring, 10);
        if (r) {
@@ -215,7 +212,7 @@ static int uvd_v5_0_hw_fini(void *handle)
        if (RREG32(mmUVD_STATUS) != 0)
                uvd_v5_0_stop(adev);
 
-       ring->ready = false;
+       ring->sched.ready = false;
 
        return 0;
 }
@@ -500,11 +497,8 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
 
        WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
        r = amdgpu_ring_alloc(ring, 3);
-       if (r) {
-               DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
-                         ring->idx, r);
+       if (r)
                return r;
-       }
        amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
        amdgpu_ring_write(ring, 0xDEADBEEF);
        amdgpu_ring_commit(ring);
@@ -515,14 +509,9 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
                DRM_UDELAY(1);
        }
 
-       if (i < adev->usec_timeout) {
-               DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-                        ring->idx, i);
-       } else {
-               DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-                         ring->idx, tmp);
-               r = -EINVAL;
-       }
+       if (i >= adev->usec_timeout)
+               r = -ETIMEDOUT;
+
        return r;
 }
 
@@ -535,8 +524,9 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
  * Write ring commands to execute the indirect buffer
  */
 static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
+                                 struct amdgpu_job *job,
                                  struct amdgpu_ib *ib,
-                                 unsigned vmid, bool ctx_switch)
+                                 bool ctx_switch)
 {
        amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
index 7a5b40275e8e7b0ffa7600c99af464dfe5031a23..f184842ef2a280b183216b279a0253a42d4e00f7 100644 (file)
@@ -175,11 +175,8 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
        int r;
 
        r = amdgpu_ring_alloc(ring, 16);
-       if (r) {
-               DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
-                         ring->idx, r);
+       if (r)
                return r;
-       }
        amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
        amdgpu_ring_commit(ring);
 
@@ -189,14 +186,8 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
                DRM_UDELAY(1);
        }
 
-       if (i < adev->usec_timeout) {
-               DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-                        ring->idx, i);
-       } else {
-               DRM_ERROR("amdgpu: ring %d test failed\n",
-                         ring->idx);
+       if (i >= adev->usec_timeout)
                r = -ETIMEDOUT;
-       }
 
        return r;
 }
@@ -336,31 +327,24 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        long r;
 
        r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+       if (r)
                goto error;
-       }
 
        r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+       if (r)
                goto error;
-       }
 
        r = dma_fence_wait_timeout(fence, false, timeout);
-       if (r == 0) {
-               DRM_ERROR("amdgpu: IB test timed out.\n");
+       if (r == 0)
                r = -ETIMEDOUT;
-       } else if (r < 0) {
-               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
-       } else {
-               DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+       else if (r > 0)
                r = 0;
-       }
+
 error:
        dma_fence_put(fence);
        return r;
 }
+
 static int uvd_v6_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -476,12 +460,9 @@ static int uvd_v6_0_hw_init(void *handle)
        uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
        uvd_v6_0_enable_mgcg(adev, true);
 
-       ring->ready = true;
-       r = amdgpu_ring_test_ring(ring);
-       if (r) {
-               ring->ready = false;
+       r = amdgpu_ring_test_helper(ring);
+       if (r)
                goto done;
-       }
 
        r = amdgpu_ring_alloc(ring, 10);
        if (r) {
@@ -513,12 +494,9 @@ static int uvd_v6_0_hw_init(void *handle)
        if (uvd_v6_0_enc_support(adev)) {
                for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
                        ring = &adev->uvd.inst->ring_enc[i];
-                       ring->ready = true;
-                       r = amdgpu_ring_test_ring(ring);
-                       if (r) {
-                               ring->ready = false;
+                       r = amdgpu_ring_test_helper(ring);
+                       if (r)
                                goto done;
-                       }
                }
        }
 
@@ -548,7 +526,7 @@ static int uvd_v6_0_hw_fini(void *handle)
        if (RREG32(mmUVD_STATUS) != 0)
                uvd_v6_0_stop(adev);
 
-       ring->ready = false;
+       ring->sched.ready = false;
 
        return 0;
 }
@@ -969,11 +947,9 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
 
        WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
        r = amdgpu_ring_alloc(ring, 3);
-       if (r) {
-               DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
-                         ring->idx, r);
+       if (r)
                return r;
-       }
+
        amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
        amdgpu_ring_write(ring, 0xDEADBEEF);
        amdgpu_ring_commit(ring);
@@ -984,14 +960,9 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
                DRM_UDELAY(1);
        }
 
-       if (i < adev->usec_timeout) {
-               DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-                        ring->idx, i);
-       } else {
-               DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-                         ring->idx, tmp);
-               r = -EINVAL;
-       }
+       if (i >= adev->usec_timeout)
+               r = -ETIMEDOUT;
+
        return r;
 }
 
@@ -1004,9 +975,12 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
  * Write ring commands to execute the indirect buffer
  */
 static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+                                 struct amdgpu_job *job,
                                  struct amdgpu_ib *ib,
-                                 unsigned vmid, bool ctx_switch)
+                                 bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
        amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
        amdgpu_ring_write(ring, vmid);
 
@@ -1027,8 +1001,12 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
  * Write enc ring commands to execute the indirect buffer
  */
 static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
-               struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+                                       struct amdgpu_job *job,
+                                       struct amdgpu_ib *ib,
+                                       bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
        amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
        amdgpu_ring_write(ring, vmid);
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
index 58b39afcfb86461d68e47496578184d2c7b74317..8a4595968d98f4b506bcfbddee121cd640c4c956 100644 (file)
@@ -183,11 +183,8 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
                return 0;
 
        r = amdgpu_ring_alloc(ring, 16);
-       if (r) {
-               DRM_ERROR("amdgpu: uvd enc failed to lock (%d)ring %d (%d).\n",
-                         ring->me, ring->idx, r);
+       if (r)
                return r;
-       }
        amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
        amdgpu_ring_commit(ring);
 
@@ -197,14 +194,8 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
                DRM_UDELAY(1);
        }
 
-       if (i < adev->usec_timeout) {
-               DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n",
-                        ring->me, ring->idx, i);
-       } else {
-               DRM_ERROR("amdgpu: (%d)ring %d test failed\n",
-                         ring->me, ring->idx);
+       if (i >= adev->usec_timeout)
                r = -ETIMEDOUT;
-       }
 
        return r;
 }
@@ -343,27 +334,19 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        long r;
 
        r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
-       if (r) {
-               DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ring->me, r);
+       if (r)
                goto error;
-       }
 
        r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence);
-       if (r) {
-               DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ring->me, r);
+       if (r)
                goto error;
-       }
 
        r = dma_fence_wait_timeout(fence, false, timeout);
-       if (r == 0) {
-               DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ring->me);
+       if (r == 0)
                r = -ETIMEDOUT;
-       } else if (r < 0) {
-               DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ring->me, r);
-       } else {
-               DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ring->me, ring->idx);
+       else if (r > 0)
                r = 0;
-       }
+
 error:
        dma_fence_put(fence);
        return r;
@@ -540,12 +523,9 @@ static int uvd_v7_0_hw_init(void *handle)
                ring = &adev->uvd.inst[j].ring;
 
                if (!amdgpu_sriov_vf(adev)) {
-                       ring->ready = true;
-                       r = amdgpu_ring_test_ring(ring);
-                       if (r) {
-                               ring->ready = false;
+                       r = amdgpu_ring_test_helper(ring);
+                       if (r)
                                goto done;
-                       }
 
                        r = amdgpu_ring_alloc(ring, 10);
                        if (r) {
@@ -582,12 +562,9 @@ static int uvd_v7_0_hw_init(void *handle)
 
                for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
                        ring = &adev->uvd.inst[j].ring_enc[i];
-                       ring->ready = true;
-                       r = amdgpu_ring_test_ring(ring);
-                       if (r) {
-                               ring->ready = false;
+                       r = amdgpu_ring_test_helper(ring);
+                       if (r)
                                goto done;
-                       }
                }
        }
 done:
@@ -619,7 +596,7 @@ static int uvd_v7_0_hw_fini(void *handle)
        for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
                if (adev->uvd.harvest_config & (1 << i))
                        continue;
-               adev->uvd.inst[i].ring.ready = false;
+               adev->uvd.inst[i].ring.sched.ready = false;
        }
 
        return 0;
@@ -1235,11 +1212,9 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
 
        WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
        r = amdgpu_ring_alloc(ring, 3);
-       if (r) {
-               DRM_ERROR("amdgpu: (%d)cp failed to lock ring %d (%d).\n",
-                         ring->me, ring->idx, r);
+       if (r)
                return r;
-       }
+
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
        amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -1251,14 +1226,9 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
                DRM_UDELAY(1);
        }
 
-       if (i < adev->usec_timeout) {
-               DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n",
-                        ring->me, ring->idx, i);
-       } else {
-               DRM_ERROR("(%d)amdgpu: ring %d test failed (0x%08X)\n",
-                         ring->me, ring->idx, tmp);
-               r = -EINVAL;
-       }
+       if (i >= adev->usec_timeout)
+               r = -ETIMEDOUT;
+
        return r;
 }
 
@@ -1300,10 +1270,12 @@ static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
  * Write ring commands to execute the indirect buffer
  */
 static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
+                                 struct amdgpu_job *job,
                                  struct amdgpu_ib *ib,
-                                 unsigned vmid, bool ctx_switch)
+                                 bool ctx_switch)
 {
        struct amdgpu_device *adev = ring->adev;
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
@@ -1329,8 +1301,12 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
  * Write enc ring commands to execute the indirect buffer
  */
 static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
-               struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+                                       struct amdgpu_job *job,
+                                       struct amdgpu_ib *ib,
+                                       bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
        amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
        amdgpu_ring_write(ring, vmid);
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
index ea28828360d3b3c1d181d0214134b33f8dba7704..bed78a778e3f1f546e30e18dc67f709751d87fc6 100644 (file)
@@ -463,15 +463,11 @@ static int vce_v2_0_hw_init(void *handle)
 
        amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
        vce_v2_0_enable_mgcg(adev, true, false);
-       for (i = 0; i < adev->vce.num_rings; i++)
-               adev->vce.ring[i].ready = false;
 
        for (i = 0; i < adev->vce.num_rings; i++) {
-               r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+               r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
                if (r)
                        return r;
-               else
-                       adev->vce.ring[i].ready = true;
        }
 
        DRM_INFO("VCE initialized successfully.\n");
index 6dbd39730070a30132f7841a1e7dc18a8e54a35a..3e84840859a725708d5df38640bf9b1b65201e3a 100644 (file)
@@ -474,15 +474,10 @@ static int vce_v3_0_hw_init(void *handle)
 
        amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
 
-       for (i = 0; i < adev->vce.num_rings; i++)
-               adev->vce.ring[i].ready = false;
-
        for (i = 0; i < adev->vce.num_rings; i++) {
-               r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+               r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
                if (r)
                        return r;
-               else
-                       adev->vce.ring[i].ready = true;
        }
 
        DRM_INFO("VCE initialized successfully.\n");
@@ -838,8 +833,12 @@ out:
 }
 
 static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
-               struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+                                 struct amdgpu_job *job,
+                                 struct amdgpu_ib *ib,
+                                 bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
        amdgpu_ring_write(ring, VCE_CMD_IB_VM);
        amdgpu_ring_write(ring, vmid);
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
index 1c9471890bf71b9ce94a5eecb96f4594eba01bc0..0054ba1b9a6855df99dfd1f3362bce11ed6af0ab 100644 (file)
@@ -519,15 +519,10 @@ static int vce_v4_0_hw_init(void *handle)
        if (r)
                return r;
 
-       for (i = 0; i < adev->vce.num_rings; i++)
-               adev->vce.ring[i].ready = false;
-
        for (i = 0; i < adev->vce.num_rings; i++) {
-               r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+               r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
                if (r)
                        return r;
-               else
-                       adev->vce.ring[i].ready = true;
        }
 
        DRM_INFO("VCE initialized successfully.\n");
@@ -549,7 +544,7 @@ static int vce_v4_0_hw_fini(void *handle)
        }
 
        for (i = 0; i < adev->vce.num_rings; i++)
-               adev->vce.ring[i].ready = false;
+               adev->vce.ring[i].sched.ready = false;
 
        return 0;
 }
@@ -951,9 +946,11 @@ static int vce_v4_0_set_powergating_state(void *handle,
 }
 #endif
 
-static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
-               struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
+                                       struct amdgpu_ib *ib, bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
        amdgpu_ring_write(ring, VCE_CMD_IB_VM);
        amdgpu_ring_write(ring, vmid);
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
index eae90922fdbe0f4356be31c4fd16eeb6846409a2..c1a03505f956c9419967a5a07fa81f47bfa8dd81 100644 (file)
@@ -176,30 +176,22 @@ static int vcn_v1_0_hw_init(void *handle)
        struct amdgpu_ring *ring = &adev->vcn.ring_dec;
        int i, r;
 
-       ring->ready = true;
-       r = amdgpu_ring_test_ring(ring);
-       if (r) {
-               ring->ready = false;
+       r = amdgpu_ring_test_helper(ring);
+       if (r)
                goto done;
-       }
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
                ring = &adev->vcn.ring_enc[i];
-               ring->ready = true;
-               r = amdgpu_ring_test_ring(ring);
-               if (r) {
-                       ring->ready = false;
+               ring->sched.ready = true;
+               r = amdgpu_ring_test_helper(ring);
+               if (r)
                        goto done;
-               }
        }
 
        ring = &adev->vcn.ring_jpeg;
-       ring->ready = true;
-       r = amdgpu_ring_test_ring(ring);
-       if (r) {
-               ring->ready = false;
+       r = amdgpu_ring_test_helper(ring);
+       if (r)
                goto done;
-       }
 
 done:
        if (!r)
@@ -224,7 +216,7 @@ static int vcn_v1_0_hw_fini(void *handle)
        if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
                vcn_v1_0_stop(adev);
 
-       ring->ready = false;
+       ring->sched.ready = false;
 
        return 0;
 }
@@ -1366,10 +1358,12 @@ static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
  * Write ring commands to execute the indirect buffer
  */
 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
-                                 struct amdgpu_ib *ib,
-                                 unsigned vmid, bool ctx_switch)
+                                       struct amdgpu_job *job,
+                                       struct amdgpu_ib *ib,
+                                       bool ctx_switch)
 {
        struct amdgpu_device *adev = ring->adev;
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
@@ -1524,8 +1518,12 @@ static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
  * Write enc ring commands to execute the indirect buffer
  */
 static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
-               struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+                                       struct amdgpu_job *job,
+                                       struct amdgpu_ib *ib,
+                                       bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
        amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
        amdgpu_ring_write(ring, vmid);
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
@@ -1725,10 +1723,12 @@ static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u6
  * Write ring commands to execute the indirect buffer.
  */
 static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
-                                 struct amdgpu_ib *ib,
-                                 unsigned vmid, bool ctx_switch)
+                                       struct amdgpu_job *job,
+                                       struct amdgpu_ib *ib,
+                                       bool ctx_switch)
 {
        struct amdgpu_device *adev = ring->adev;
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 
        amdgpu_ring_write(ring,
                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
index a99f71797aa359f83217887dd4dcf531d639d45e..a0fda6f9252a52979b5c90569d48b4212f4ea27a 100644 (file)
@@ -129,7 +129,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
        else
                wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
        WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
-       WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
+       WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFFFF);
 
        /* set rptr, wptr to 0 */
        WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
index 2d4473557b0d23210782ff72397b47bd7f9c94ef..d13fc4fcb51790859f03aefb14f4bd90067c8fd8 100644 (file)
@@ -49,6 +49,7 @@ int vega20_reg_base_init(struct amdgpu_device *adev)
                adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
                adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
                adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+               adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
        }
        return 0;
 }
index 5d2475d5392ce25cbb719c0fa519e0c0d94777fc..177d1e5329a5723482c437381253700c224aeb90 100644 (file)
@@ -23,6 +23,7 @@
 #include "kfd_priv.h"
 #include "kfd_events.h"
 #include "cik_int.h"
+#include "amdgpu_amdkfd.h"
 
 static bool cik_event_interrupt_isr(struct kfd_dev *dev,
                                        const uint32_t *ih_ring_entry,
@@ -107,7 +108,7 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
                kfd_process_vm_fault(dev->dqm, pasid);
 
                memset(&info, 0, sizeof(info));
-               dev->kfd2kgd->get_vm_fault_info(dev->kgd, &info);
+               amdgpu_amdkfd_gpuvm_get_vm_fault_info(dev->kgd, &info);
                if (!info.page_addr && !info.status)
                        return;
 
index 37ce6dd653917cb441958a3fbacef9463a611b9a..8e2a1663c4db6e41ec6d35f59dc40f289fbd83d9 100644 (file)
@@ -68,6 +68,4 @@
 
 #define GRBM_GFX_INDEX                                 0x30800
 
-#define        ATC_VMID_PASID_MAPPING_VALID                    (1U << 31)
-
 #endif
index 14d5b5fa822d4e8722cc2d6a3e6cc1cba0b1b7bd..5f4062b41adddab4cc30797766d18756ff1def79 100644 (file)
@@ -37,6 +37,7 @@
 #include "kfd_priv.h"
 #include "kfd_device_queue_manager.h"
 #include "kfd_dbgmgr.h"
+#include "amdgpu_amdkfd.h"
 
 static long kfd_ioctl(struct file *, unsigned int, unsigned long);
 static int kfd_open(struct inode *, struct file *);
@@ -834,8 +835,7 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
        dev = kfd_device_by_id(args->gpu_id);
        if (dev)
                /* Reading GPU clock counter from KGD */
-               args->gpu_clock_counter =
-                       dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
+               args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->kgd);
        else
                /* Node without GPU resource */
                args->gpu_clock_counter = 0;
@@ -1042,7 +1042,7 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
                }
                mutex_unlock(&p->mutex);
 
-               err = kfd->kfd2kgd->map_gtt_bo_to_kernel(kfd->kgd,
+               err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->kgd,
                                                mem, &kern_addr, &size);
                if (err) {
                        pr_err("Failed to map event page to kernel\n");
@@ -1240,7 +1240,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev)
        if (dev->device_info->needs_iommu_device)
                return false;
 
-       dev->kfd2kgd->get_local_mem_info(dev->kgd, &mem_info);
+       amdgpu_amdkfd_get_local_mem_info(dev->kgd, &mem_info);
        if (mem_info.local_mem_size_private == 0 &&
                        mem_info.local_mem_size_public > 0)
                return true;
@@ -1281,7 +1281,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
                goto err_unlock;
        }
 
-       err = dev->kfd2kgd->alloc_memory_of_gpu(
+       err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
                dev->kgd, args->va_addr, args->size,
                pdd->vm, (struct kgd_mem **) &mem, &offset,
                flags);
@@ -1303,7 +1303,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
        return 0;
 
 err_free:
-       dev->kfd2kgd->free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+       amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
 err_unlock:
        mutex_unlock(&p->mutex);
        return err;
@@ -1338,7 +1338,8 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
                goto err_unlock;
        }
 
-       ret = dev->kfd2kgd->free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+       ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd,
+                                               (struct kgd_mem *)mem);
 
        /* If freeing the buffer failed, leave the handle in place for
         * clean-up during process tear-down.
@@ -1418,7 +1419,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
                        err = PTR_ERR(peer_pdd);
                        goto get_mem_obj_from_handle_failed;
                }
-               err = peer->kfd2kgd->map_memory_to_gpu(
+               err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
                        peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
                if (err) {
                        pr_err("Failed to map to gpu %d/%d\n",
@@ -1430,7 +1431,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
 
        mutex_unlock(&p->mutex);
 
-       err = dev->kfd2kgd->sync_memory(dev->kgd, (struct kgd_mem *) mem, true);
+       err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true);
        if (err) {
                pr_debug("Sync memory failed, wait interrupted by user signal\n");
                goto sync_memory_failed;
@@ -1525,7 +1526,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
                        err = -ENODEV;
                        goto get_mem_obj_from_handle_failed;
                }
-               err = dev->kfd2kgd->unmap_memory_to_gpu(
+               err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
                        peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
                if (err) {
                        pr_err("Failed to unmap from gpu %d/%d\n",
index 56412b0e7e1c73d79ff2a701b5da7a54affb258a..3783d122f28354a42e67e478faf470778edb5bfe 100644 (file)
@@ -26,6 +26,7 @@
 #include "kfd_priv.h"
 #include "kfd_topology.h"
 #include "kfd_iommu.h"
+#include "amdgpu_amdkfd.h"
 
 /* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
  * GPU processor ID are expressed with Bit[31]=1.
@@ -753,12 +754,10 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
                return -ENODATA;
        }
 
-       pcrat_image = kmalloc(crat_table->length, GFP_KERNEL);
+       pcrat_image = kmemdup(crat_table, crat_table->length, GFP_KERNEL);
        if (!pcrat_image)
                return -ENOMEM;
 
-       memcpy(pcrat_image, crat_table, crat_table->length);
-
        *crat_image = pcrat_image;
        *size = crat_table->length;
 
@@ -1161,7 +1160,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
        cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
        cu->proximity_domain = proximity_domain;
 
-       kdev->kfd2kgd->get_cu_info(kdev->kgd, &cu_info);
+       amdgpu_amdkfd_get_cu_info(kdev->kgd, &cu_info);
        cu->num_simd_per_cu = cu_info.simd_per_cu;
        cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
        cu->max_waves_simd = cu_info.max_waves_per_simd;
@@ -1192,7 +1191,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
         * report the total FB size (public+private) as a single
         * private heap.
         */
-       kdev->kfd2kgd->get_local_mem_info(kdev->kgd, &local_mem_info);
+       amdgpu_amdkfd_get_local_mem_info(kdev->kgd, &local_mem_info);
        sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
                        sub_type_hdr->length);
 
index a9f18ea7e354377c4ed47532eaaf1257e11b695b..c004647c8cb413c80be5c83b465043cb0f2d02f6 100644 (file)
@@ -28,6 +28,7 @@
 #include "kfd_pm4_headers_vi.h"
 #include "cwsr_trap_handler.h"
 #include "kfd_iommu.h"
+#include "amdgpu_amdkfd.h"
 
 #define MQD_SIZE_ALIGNED 768
 
@@ -478,7 +479,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
        /* add another 512KB for all other allocations on gart (HPD, fences) */
        size += 512 * 1024;
 
-       if (kfd->kfd2kgd->init_gtt_mem_allocation(
+       if (amdgpu_amdkfd_alloc_gtt_mem(
                        kfd->kgd, size, &kfd->gtt_mem,
                        &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
                        false)) {
@@ -552,7 +553,7 @@ kfd_topology_add_device_error:
 kfd_doorbell_error:
        kfd_gtt_sa_fini(kfd);
 kfd_gtt_sa_init_error:
-       kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
+       amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
        dev_err(kfd_device,
                "device %x:%x NOT added due to errors\n",
                kfd->pdev->vendor, kfd->pdev->device);
@@ -569,7 +570,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
                kfd_topology_remove_device(kfd);
                kfd_doorbell_fini(kfd);
                kfd_gtt_sa_fini(kfd);
-               kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
+               amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
        }
 
        kfree(kfd);
@@ -681,6 +682,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
 {
        uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
        bool is_patched = false;
+       unsigned long flags;
 
        if (!kfd->init_complete)
                return;
@@ -690,7 +692,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
                return;
        }
 
-       spin_lock(&kfd->interrupt_lock);
+       spin_lock_irqsave(&kfd->interrupt_lock, flags);
 
        if (kfd->interrupts_active
            && interrupt_is_wanted(kfd, ih_ring_entry,
@@ -699,7 +701,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
                                     is_patched ? patched_ihre : ih_ring_entry))
                queue_work(kfd->ih_wq, &kfd->interrupt_work);
 
-       spin_unlock(&kfd->interrupt_lock);
+       spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
 }
 
 int kgd2kfd_quiesce_mm(struct mm_struct *mm)
index a3b9339671713cbbbb4ade0fb049f5a786607a2f..fb9d66ea13b761cf745fedfe61b6a0bee0dafca7 100644 (file)
@@ -33,6 +33,7 @@
 #include "kfd_mqd_manager.h"
 #include "cik_regs.h"
 #include "kfd_kernel_queue.h"
+#include "amdgpu_amdkfd.h"
 
 /* Size of the per-pipe EOP queue */
 #define CIK_HPD_EOP_BYTES_LOG2 11
@@ -219,7 +220,7 @@ static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
        if (ret)
                return ret;
 
-       return kdev->kfd2kgd->submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
+       return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
                                qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
                                pmf->release_mem_size / sizeof(uint32_t));
 }
@@ -672,7 +673,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
 
        pdd = qpd_to_pdd(qpd);
        /* Retrieve PD base */
-       pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
+       pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
 
        dqm_lock(dqm);
        if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
@@ -743,7 +744,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
 
        pdd = qpd_to_pdd(qpd);
        /* Retrieve PD base */
-       pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
+       pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
 
        dqm_lock(dqm);
        if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
@@ -793,7 +794,7 @@ static int register_process(struct device_queue_manager *dqm,
 
        pdd = qpd_to_pdd(qpd);
        /* Retrieve PD base */
-       pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
+       pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
 
        dqm_lock(dqm);
        list_add(&n->list, &dqm->queues);
@@ -805,7 +806,7 @@ static int register_process(struct device_queue_manager *dqm,
        retval = dqm->asic_ops.update_qpd(dqm, qpd);
 
        if (dqm->processes_count++ == 0)
-               dqm->dev->kfd2kgd->set_compute_idle(dqm->dev->kgd, false);
+               amdgpu_amdkfd_set_compute_idle(dqm->dev->kgd, false);
 
        dqm_unlock(dqm);
 
@@ -829,7 +830,7 @@ static int unregister_process(struct device_queue_manager *dqm,
                        list_del(&cur->list);
                        kfree(cur);
                        if (--dqm->processes_count == 0)
-                               dqm->dev->kfd2kgd->set_compute_idle(
+                               amdgpu_amdkfd_set_compute_idle(
                                        dqm->dev->kgd, true);
                        goto out;
                }
@@ -845,15 +846,8 @@ static int
 set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
                        unsigned int vmid)
 {
-       uint32_t pasid_mapping;
-
-       pasid_mapping = (pasid == 0) ? 0 :
-               (uint32_t)pasid |
-               ATC_VMID_PASID_MAPPING_VALID;
-
        return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
-                                               dqm->dev->kgd, pasid_mapping,
-                                               vmid);
+                                               dqm->dev->kgd, pasid, vmid);
 }
 
 static void init_interrupts(struct device_queue_manager *dqm)
@@ -1796,7 +1790,7 @@ static void kfd_process_hw_exception(struct work_struct *work)
 {
        struct device_queue_manager *dqm = container_of(work,
                        struct device_queue_manager, hw_exception_work);
-       dqm->dev->kfd2kgd->gpu_recover(dqm->dev->kgd);
+       amdgpu_amdkfd_gpu_reset(dqm->dev->kgd);
 }
 
 #if defined(CONFIG_DEBUG_FS)
index e33019a7a883cc1d2df8410ea141257d71297adb..6910028010d637d69892ee03cdca07d96aae015d 100644 (file)
@@ -22,6 +22,7 @@
  */
 
 #include "kfd_mqd_manager.h"
+#include "amdgpu_amdkfd.h"
 
 struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
                                        struct kfd_dev *dev)
@@ -58,7 +59,7 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
        uint32_t cu_per_sh[4] = {0};
        int i, se, cu = 0;
 
-       mm->dev->kfd2kgd->get_cu_info(mm->dev->kgd, &cu_info);
+       amdgpu_amdkfd_get_cu_info(mm->dev->kgd, &cu_info);
 
        if (cu_mask_count > cu_info.cu_active_number)
                cu_mask_count = cu_info.cu_active_number;
index f381c1cb27bdc867d67308e4f0cc2176e777dc5c..9dbba609450e73a7e2c4702bc7c3d59c1274cf01 100644 (file)
@@ -30,6 +30,7 @@
 #include "gc/gc_9_0_offset.h"
 #include "gc/gc_9_0_sh_mask.h"
 #include "sdma0/sdma0_4_0_sh_mask.h"
+#include "amdgpu_amdkfd.h"
 
 static inline struct v9_mqd *get_mqd(void *mqd)
 {
@@ -83,7 +84,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
                *mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
                if (!*mqd_mem_obj)
                        return -ENOMEM;
-               retval = kfd->kfd2kgd->init_gtt_mem_allocation(kfd->kgd,
+               retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd,
                        ALIGN(q->ctl_stack_size, PAGE_SIZE) +
                                ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
                        &((*mqd_mem_obj)->gtt_mem),
@@ -250,7 +251,7 @@ static void uninit_mqd(struct mqd_manager *mm, void *mqd,
        struct kfd_dev *kfd = mm->dev;
 
        if (mqd_mem_obj->gtt_mem) {
-               kfd->kfd2kgd->free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem);
+               amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem);
                kfree(mqd_mem_obj);
        } else {
                kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
index 15fff4420e534fbd79c1cc1844244f715d348935..33b08ff00b5012f9741209e43d0c5d21e644e6ec 100644 (file)
@@ -22,6 +22,7 @@
 
 #include <linux/types.h>
 #include "kfd_priv.h"
+#include "amdgpu_ids.h"
 
 static unsigned int pasid_bits = 16;
 static const struct kfd2kgd_calls *kfd2kgd;
@@ -71,7 +72,7 @@ unsigned int kfd_pasid_alloc(void)
                        return false;
        }
 
-       r = kfd2kgd->alloc_pasid(pasid_bits);
+       r = amdgpu_pasid_alloc(pasid_bits);
 
        return r > 0 ? r : 0;
 }
@@ -79,5 +80,5 @@ unsigned int kfd_pasid_alloc(void)
 void kfd_pasid_free(unsigned int pasid)
 {
        if (kfd2kgd)
-               kfd2kgd->free_pasid(pasid);
+               amdgpu_pasid_free(pasid);
 }
index 53ff86d45d918d7e465309b9e476641332b241ef..dec8e64f36bdc80ef2ac851ba35d700bf8d32140 100644 (file)
@@ -507,6 +507,7 @@ struct qcm_process_device {
         * All the memory management data should be here too
         */
        uint64_t gds_context_area;
+       /* Contains page table flags such as AMDGPU_PTE_VALID since gfx9 */
        uint64_t page_table_base;
        uint32_t sh_mem_config;
        uint32_t sh_mem_bases;
index 0039e451d9af2a7e816dafb64559595b22a3d8a4..80b36e860a0a8de2d30f4475b4802db3028b6cf2 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/compat.h>
 #include <linux/mman.h>
 #include <linux/file.h>
+#include "amdgpu_amdkfd.h"
 
 struct mm_struct;
 
@@ -100,8 +101,8 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem,
 {
        struct kfd_dev *dev = pdd->dev;
 
-       dev->kfd2kgd->unmap_memory_to_gpu(dev->kgd, mem, pdd->vm);
-       dev->kfd2kgd->free_memory_of_gpu(dev->kgd, mem);
+       amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm);
+       amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem);
 }
 
 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
@@ -119,16 +120,16 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
        int handle;
        int err;
 
-       err = kdev->kfd2kgd->alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
+       err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
                                                 pdd->vm, &mem, NULL, flags);
        if (err)
                goto err_alloc_mem;
 
-       err = kdev->kfd2kgd->map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
+       err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
        if (err)
                goto err_map_mem;
 
-       err = kdev->kfd2kgd->sync_memory(kdev->kgd, mem, true);
+       err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true);
        if (err) {
                pr_debug("Sync memory failed, wait interrupted by user signal\n");
                goto sync_memory_failed;
@@ -147,7 +148,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
        }
 
        if (kptr) {
-               err = kdev->kfd2kgd->map_gtt_bo_to_kernel(kdev->kgd,
+               err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd,
                                (struct kgd_mem *)mem, kptr, NULL);
                if (err) {
                        pr_debug("Map GTT BO to kernel failed\n");
@@ -165,7 +166,7 @@ sync_memory_failed:
        return err;
 
 err_map_mem:
-       kdev->kfd2kgd->free_memory_of_gpu(kdev->kgd, mem);
+       amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem);
 err_alloc_mem:
        *kptr = NULL;
        return err;
@@ -296,11 +297,11 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
                                    per_device_list) {
                        if (!peer_pdd->vm)
                                continue;
-                       peer_pdd->dev->kfd2kgd->unmap_memory_to_gpu(
+                       amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
                                peer_pdd->dev->kgd, mem, peer_pdd->vm);
                }
 
-               pdd->dev->kfd2kgd->free_memory_of_gpu(pdd->dev->kgd, mem);
+               amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem);
                kfd_process_device_remove_obj_handle(pdd, id);
        }
 }
@@ -323,11 +324,12 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
                                pdd->dev->id, p->pasid);
 
                if (pdd->drm_file) {
-                       pdd->dev->kfd2kgd->release_process_vm(pdd->dev->kgd, pdd->vm);
+                       amdgpu_amdkfd_gpuvm_release_process_vm(
+                                       pdd->dev->kgd, pdd->vm);
                        fput(pdd->drm_file);
                }
                else if (pdd->vm)
-                       pdd->dev->kfd2kgd->destroy_process_vm(
+                       amdgpu_amdkfd_gpuvm_destroy_process_vm(
                                pdd->dev->kgd, pdd->vm);
 
                list_del(&pdd->per_device_list);
@@ -688,12 +690,12 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
        dev = pdd->dev;
 
        if (drm_file)
-               ret = dev->kfd2kgd->acquire_process_vm(
+               ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
                        dev->kgd, drm_file, p->pasid,
                        &pdd->vm, &p->kgd_process_info, &p->ef);
        else
-               ret = dev->kfd2kgd->create_process_vm(
-                       dev->kgd, p->pasid, &pdd->vm, &p->kgd_process_info, &p->ef);
+               ret = amdgpu_amdkfd_gpuvm_create_process_vm(dev->kgd, p->pasid,
+                       &pdd->vm, &p->kgd_process_info, &p->ef);
        if (ret) {
                pr_err("Failed to create process VM object\n");
                return ret;
@@ -714,7 +716,7 @@ err_init_cwsr:
 err_reserve_ib_mem:
        kfd_process_device_free_bos(pdd);
        if (!drm_file)
-               dev->kfd2kgd->destroy_process_vm(dev->kgd, pdd->vm);
+               amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm);
        pdd->vm = NULL;
 
        return ret;
@@ -972,7 +974,7 @@ static void restore_process_worker(struct work_struct *work)
         */
 
        p->last_restore_timestamp = get_jiffies_64();
-       ret = pdd->dev->kfd2kgd->restore_process_bos(p->kgd_process_info,
+       ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
                                                     &p->ef);
        if (ret) {
                pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n",
index e3843c5929edffdf2d0bb45969e7818302c50336..c73b4ff61f99b61ee61b75bd04698a53dd81c584 100644 (file)
@@ -36,6 +36,7 @@
 #include "kfd_topology.h"
 #include "kfd_device_queue_manager.h"
 #include "kfd_iommu.h"
+#include "amdgpu_amdkfd.h"
 
 /* topology_device_list - Master list of all topology devices */
 static struct list_head topology_device_list;
@@ -1052,7 +1053,7 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
        if (!gpu)
                return 0;
 
-       gpu->kfd2kgd->get_local_mem_info(gpu->kgd, &local_mem_info);
+       amdgpu_amdkfd_get_local_mem_info(gpu->kgd, &local_mem_info);
 
        local_mem_size = local_mem_info.local_mem_size_private +
                        local_mem_info.local_mem_size_public;
@@ -1118,8 +1119,7 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev)
         * for APUs - If CRAT from ACPI reports more than one bank, then
         *      all the banks will report the same mem_clk_max information
         */
-       dev->gpu->kfd2kgd->get_local_mem_info(dev->gpu->kgd,
-               &local_mem_info);
+       amdgpu_amdkfd_get_local_mem_info(dev->gpu->kgd, &local_mem_info);
 
        list_for_each_entry(mem, &dev->mem_props, list)
                mem->mem_clk_max = local_mem_info.mem_clk_max;
@@ -1240,7 +1240,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
         * needed for the topology
         */
 
-       dev->gpu->kfd2kgd->get_cu_info(dev->gpu->kgd, &cu_info);
+       amdgpu_amdkfd_get_cu_info(dev->gpu->kgd, &cu_info);
        dev->node_props.simd_arrays_per_engine =
                cu_info.num_shader_arrays_per_engine;
 
@@ -1249,7 +1249,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
        dev->node_props.location_id = PCI_DEVID(gpu->pdev->bus->number,
                gpu->pdev->devfn);
        dev->node_props.max_engine_clk_fcompute =
-               dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz(dev->gpu->kgd);
+               amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd);
        dev->node_props.max_engine_clk_ccompute =
                cpufreq_quick_get_max(0) / 1000;
        dev->node_props.drm_render_minor =
index dd688cfed6aa27d53b5bf134b6cc48f4750eb0ec..aa43bb253ea28c3177b610a9850fd3e3ac5a7fc5 100644 (file)
 #define FIRMWARE_RAVEN_DMCU            "amdgpu/raven_dmcu.bin"
 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
 
+/**
+ * DOC: overview
+ *
+ * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
+ * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
+ * requests into DC requests, and DC responses into DRM responses.
+ *
+ * The root control structure is &struct amdgpu_display_manager.
+ */
+
 /* basic init/fini API */
 static int amdgpu_dm_init(struct amdgpu_device *adev);
 static void amdgpu_dm_fini(struct amdgpu_device *adev);
@@ -95,7 +105,7 @@ static void
 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
 
 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
-                               struct amdgpu_plane *aplane,
+                               struct drm_plane *plane,
                                unsigned long possible_crtcs);
 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
                               struct drm_plane *plane,
@@ -379,11 +389,6 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
 
 }
 
-/*
- * Init display KMS
- *
- * Returns 0 on success
- */
 static int amdgpu_dm_init(struct amdgpu_device *adev)
 {
        struct dc_init_data init_data;
@@ -429,6 +434,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
            adev->asic_type < CHIP_RAVEN)
                init_data.flags.gpu_vm_support = true;
 
+       if (amdgpu_dc_feature_mask & DC_FBC_MASK)
+               init_data.flags.fbc_support = true;
+
        /* Display Core create. */
        adev->dm.dc = dc_create(&init_data);
 
@@ -660,6 +668,26 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
        drm_modeset_unlock(&dev->mode_config.connection_mutex);
 }
 
+/**
+ * dm_hw_init() - Initialize DC device
+ * @handle: The base driver device containing the amdpgu_dm device.
+ *
+ * Initialize the &struct amdgpu_display_manager device. This involves calling
+ * the initializers of each DM component, then populating the struct with them.
+ *
+ * Although the function implies hardware initialization, both hardware and
+ * software are initialized here. Splitting them out to their relevant init
+ * hooks is a future TODO item.
+ *
+ * Some notable things that are initialized here:
+ *
+ * - Display Core, both software and hardware
+ * - DC modules that we need (freesync and color management)
+ * - DRM software states
+ * - Interrupt sources and handlers
+ * - Vblank support
+ * - Debug FS entries, if enabled
+ */
 static int dm_hw_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -670,6 +698,14 @@ static int dm_hw_init(void *handle)
        return 0;
 }
 
+/**
+ * dm_hw_fini() - Teardown DC device
+ * @handle: The base driver device containing the amdpgu_dm device.
+ *
+ * Teardown components within &struct amdgpu_display_manager that require
+ * cleanup. This involves cleaning up the DRM device, DC, and any modules that
+ * were loaded. Also flush IRQ workqueues and disable them.
+ */
 static int dm_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -895,6 +931,16 @@ static int dm_resume(void *handle)
        return ret;
 }
 
+/**
+ * DOC: DM Lifecycle
+ *
+ * DM (and consequently DC) is registered in the amdgpu base driver as a IP
+ * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
+ * the base driver's device list to be initialized and torn down accordingly.
+ *
+ * The functions to do so are provided as hooks in &struct amd_ip_funcs.
+ */
+
 static const struct amd_ip_funcs amdgpu_dm_funcs = {
        .name = "dm",
        .early_init = dm_early_init,
@@ -962,6 +1008,12 @@ dm_atomic_state_alloc_free(struct drm_atomic_state *state)
        kfree(dm_state);
 }
 
+/**
+ * DOC: atomic
+ *
+ * *WIP*
+ */
+
 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
        .fb_create = amdgpu_display_user_framebuffer_create,
        .output_poll_changed = drm_fb_helper_output_poll_changed,
@@ -1524,15 +1576,23 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
 {
        struct amdgpu_display_manager *dm = bl_get_data(bd);
 
+       /* backlight_pwm_u16_16 parameter is in unsigned 32 bit, 16 bit integer
+        * and 16 bit fractional, where 1.0 is max backlight value.
+        * bd->props.brightness is 8 bit format and needs to be converted by
+        * scaling via copy lower byte to upper byte of 16 bit value.
+        */
+       uint32_t brightness = bd->props.brightness * 0x101;
+
        /*
         * PWM interperts 0 as 100% rather than 0% because of HW
-        * limitation for level 0.So limiting minimum brightness level
+        * limitation for level 0.  So limiting minimum brightness level
         * to 1.
         */
        if (bd->props.brightness < 1)
-               return 1;
+               brightness = 0x101;
+
        if (dc_link_set_backlight_level(dm->backlight_link,
-                       bd->props.brightness, 0, 0))
+                       brightness, 0, 0))
                return 0;
        else
                return 1;
@@ -1584,18 +1644,18 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
                             struct amdgpu_mode_info *mode_info,
                             int plane_id)
 {
-       struct amdgpu_plane *plane;
+       struct drm_plane *plane;
        unsigned long possible_crtcs;
        int ret = 0;
 
-       plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
+       plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
        mode_info->planes[plane_id] = plane;
 
        if (!plane) {
                DRM_ERROR("KMS: Failed to allocate plane\n");
                return -ENOMEM;
        }
-       plane->base.type = mode_info->plane_type[plane_id];
+       plane->type = mode_info->plane_type[plane_id];
 
        /*
         * HACK: IGT tests expect that each plane can only have
@@ -1686,7 +1746,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
        }
 
        for (i = 0; i < dm->dc->caps.max_streams; i++)
-               if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
+               if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
                        DRM_ERROR("KMS: Failed to initialize crtc\n");
                        goto fail;
                }
@@ -2707,18 +2767,11 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
        drm_connector = &aconnector->base;
 
        if (!aconnector->dc_sink) {
-               /*
-                * Create dc_sink when necessary to MST
-                * Don't apply fake_sink to MST
-                */
-               if (aconnector->mst_port) {
-                       dm_dp_mst_dc_sink_create(drm_connector);
-                       return stream;
+               if (!aconnector->mst_port) {
+                       sink = create_fake_sink(aconnector);
+                       if (!sink)
+                               return stream;
                }
-
-               sink = create_fake_sink(aconnector);
-               if (!sink)
-                       return stream;
        } else {
                sink = aconnector->dc_sink;
        }
@@ -3307,7 +3360,7 @@ void dm_drm_plane_destroy_state(struct drm_plane *plane,
 static const struct drm_plane_funcs dm_plane_funcs = {
        .update_plane   = drm_atomic_helper_update_plane,
        .disable_plane  = drm_atomic_helper_disable_plane,
-       .destroy        = drm_plane_cleanup,
+       .destroy        = drm_primary_helper_destroy,
        .reset = dm_drm_plane_reset,
        .atomic_duplicate_state = dm_drm_plane_duplicate_state,
        .atomic_destroy_state = dm_drm_plane_destroy_state,
@@ -3468,49 +3521,49 @@ static const u32 cursor_formats[] = {
 };
 
 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
-                               struct amdgpu_plane *aplane,
+                               struct drm_plane *plane,
                                unsigned long possible_crtcs)
 {
        int res = -EPERM;
 
-       switch (aplane->base.type) {
+       switch (plane->type) {
        case DRM_PLANE_TYPE_PRIMARY:
                res = drm_universal_plane_init(
                                dm->adev->ddev,
-                               &aplane->base,
+                               plane,
                                possible_crtcs,
                                &dm_plane_funcs,
                                rgb_formats,
                                ARRAY_SIZE(rgb_formats),
-                               NULL, aplane->base.type, NULL);
+                               NULL, plane->type, NULL);
                break;
        case DRM_PLANE_TYPE_OVERLAY:
                res = drm_universal_plane_init(
                                dm->adev->ddev,
-                               &aplane->base,
+                               plane,
                                possible_crtcs,
                                &dm_plane_funcs,
                                yuv_formats,
                                ARRAY_SIZE(yuv_formats),
-                               NULL, aplane->base.type, NULL);
+                               NULL, plane->type, NULL);
                break;
        case DRM_PLANE_TYPE_CURSOR:
                res = drm_universal_plane_init(
                                dm->adev->ddev,
-                               &aplane->base,
+                               plane,
                                possible_crtcs,
                                &dm_plane_funcs,
                                cursor_formats,
                                ARRAY_SIZE(cursor_formats),
-                               NULL, aplane->base.type, NULL);
+                               NULL, plane->type, NULL);
                break;
        }
 
-       drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
+       drm_plane_helper_add(plane, &dm_plane_helper_funcs);
 
        /* Create (reset) the plane state */
-       if (aplane->base.funcs->reset)
-               aplane->base.funcs->reset(&aplane->base);
+       if (plane->funcs->reset)
+               plane->funcs->reset(plane);
 
 
        return res;
@@ -3521,7 +3574,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
                               uint32_t crtc_index)
 {
        struct amdgpu_crtc *acrtc = NULL;
-       struct amdgpu_plane *cursor_plane;
+       struct drm_plane *cursor_plane;
 
        int res = -ENOMEM;
 
@@ -3529,7 +3582,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
        if (!cursor_plane)
                goto fail;
 
-       cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
+       cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
        res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
 
        acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
@@ -3540,7 +3593,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
                        dm->ddev,
                        &acrtc->base,
                        plane,
-                       &cursor_plane->base,
+                       cursor_plane,
                        &amdgpu_dm_crtc_funcs, NULL);
 
        if (res)
@@ -3779,12 +3832,12 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
        case DRM_MODE_CONNECTOR_HDMIA:
                aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
                aconnector->base.ycbcr_420_allowed =
-                       link->link_enc->features.ycbcr420_supported ? true : false;
+                       link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
                break;
        case DRM_MODE_CONNECTOR_DisplayPort:
                aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
                aconnector->base.ycbcr_420_allowed =
-                       link->link_enc->features.ycbcr420_supported ? true : false;
+                       link->link_enc->features.dp_ycbcr420_supported ? true : false;
                break;
        case DRM_MODE_CONNECTOR_DVID:
                aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
@@ -4542,6 +4595,14 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
        /*TODO Handle EINTR, reenable IRQ*/
 }
 
+/**
+ * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
+ * @state: The atomic state to commit
+ *
+ * This will tell DC to commit the constructed DC state from atomic_check,
+ * programming the hardware. Any failures here implies a hardware failure, since
+ * atomic check should have filtered anything non-kosher.
+ */
 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 {
        struct drm_device *dev = state->dev;
@@ -5313,6 +5374,12 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru
        struct dc_stream_update stream_update;
        enum surface_update_type update_type = UPDATE_TYPE_FAST;
 
+       if (!updates || !surface) {
+               DRM_ERROR("Plane or surface update failed to allocate");
+               /* Set type to FULL to avoid crashing in DC*/
+               update_type = UPDATE_TYPE_FULL;
+               goto ret;
+       }
 
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
                new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
@@ -5388,6 +5455,31 @@ ret:
        return update_type;
 }
 
+/**
+ * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
+ * @dev: The DRM device
+ * @state: The atomic state to commit
+ *
+ * Validate that the given atomic state is programmable by DC into hardware.
+ * This involves constructing a &struct dc_state reflecting the new hardware
+ * state we wish to commit, then querying DC to see if it is programmable. It's
+ * important not to modify the existing DC state. Otherwise, atomic_check
+ * may unexpectedly commit hardware changes.
+ *
+ * When validating the DC state, it's important that the right locks are
+ * acquired. For full updates case which removes/adds/updates streams on one
+ * CRTC while flipping on another CRTC, acquiring global lock will guarantee
+ * that any such full update commit will wait for completion of any outstanding
+ * flip using DRMs synchronization events. See
+ * dm_determine_update_type_for_commit()
+ *
+ * Note that DM adds the affected connectors for all CRTCs in state, when that
+ * might not seem necessary. This is because DC stream creation requires the
+ * DC sink, which is tied to the DRM connector state. Cleaning this up should
+ * be possible but non-trivial - a possible TODO item.
+ *
+ * Return: -Error code if validation failed.
+ */
 static int amdgpu_dm_atomic_check(struct drm_device *dev,
                                  struct drm_atomic_state *state)
 {
@@ -5490,15 +5582,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                lock_and_validation_needed = true;
        }
 
-       /*
-        * For full updates case when
-        * removing/adding/updating streams on one CRTC while flipping
-        * on another CRTC,
-        * acquiring global lock  will guarantee that any such full
-        * update commit
-        * will wait for completion of any outstanding flip using DRMs
-        * synchronization events.
-        */
        update_type = dm_determine_update_type_for_commit(dc, state);
 
        if (overall_update_type < update_type)
index 978b34a5011ce508055064658b556ef5082097e7..d6960644d71413c695a9e45f6f4cd19dd98a947f 100644 (file)
@@ -59,49 +59,100 @@ struct common_irq_params {
        enum dc_irq_source irq_src;
 };
 
+/**
+ * struct irq_list_head - Linked-list for low context IRQ handlers.
+ *
+ * @head: The list_head within &struct handler_data
+ * @work: A work_struct containing the deferred handler work
+ */
 struct irq_list_head {
        struct list_head head;
        /* In case this interrupt needs post-processing, 'work' will be queued*/
        struct work_struct work;
 };
 
+/**
+ * struct dm_compressor_info - Buffer info used by frame buffer compression
+ * @cpu_addr: MMIO cpu addr
+ * @bo_ptr: Pointer to the buffer object
+ * @gpu_addr: MMIO gpu addr
+ */
 struct dm_comressor_info {
        void *cpu_addr;
        struct amdgpu_bo *bo_ptr;
        uint64_t gpu_addr;
 };
 
+/**
+ * struct amdgpu_display_manager - Central amdgpu display manager device
+ *
+ * @dc: Display Core control structure
+ * @adev: AMDGPU base driver structure
+ * @ddev: DRM base driver structure
+ * @display_indexes_num: Max number of display streams supported
+ * @irq_handler_list_table_lock: Synchronizes access to IRQ tables
+ * @backlight_dev: Backlight control device
+ * @cached_state: Caches device atomic state for suspend/resume
+ * @compressor: Frame buffer compression buffer. See &struct dm_comressor_info
+ */
 struct amdgpu_display_manager {
+
        struct dc *dc;
+
+       /**
+        * @cgs_device:
+        *
+        * The Common Graphics Services device. It provides an interface for
+        * accessing registers.
+        */
        struct cgs_device *cgs_device;
 
-       struct amdgpu_device *adev;     /*AMD base driver*/
-       struct drm_device *ddev;        /*DRM base driver*/
+       struct amdgpu_device *adev;
+       struct drm_device *ddev;
        u16 display_indexes_num;
 
-       /*
-        * 'irq_source_handler_table' holds a list of handlers
-        * per (DAL) IRQ source.
+       /**
+        * @irq_handler_list_low_tab:
+        *
+        * Low priority IRQ handler table.
         *
-        * Each IRQ source may need to be handled at different contexts.
-        * By 'context' we mean, for example:
-        * - The ISR context, which is the direct interrupt handler.
-        * - The 'deferred' context - this is the post-processing of the
-        *      interrupt, but at a lower priority.
+        * It is a n*m table consisting of n IRQ sources, and m handlers per IRQ
+        * source. Low priority IRQ handlers are deferred to a workqueue to be
+        * processed. Hence, they can sleep.
         *
         * Note that handlers are called in the same order as they were
         * registered (FIFO).
         */
        struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
+
+       /**
+        * @irq_handler_list_high_tab:
+        *
+        * High priority IRQ handler table.
+        *
+        * It is a n*m table, same as &irq_handler_list_low_tab. However,
+        * handlers in this table are not deferred and are called immediately.
+        */
        struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
 
+       /**
+        * @pflip_params:
+        *
+        * Page flip IRQ parameters, passed to registered handlers when
+        * triggered.
+        */
        struct common_irq_params
        pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
 
+       /**
+        * @vblank_params:
+        *
+        * Vertical blanking IRQ parameters, passed to registered handlers when
+        * triggered.
+        */
        struct common_irq_params
        vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
 
-       /* this spin lock synchronizes access to 'irq_handler_list_table' */
        spinlock_t irq_handler_list_table_lock;
 
        struct backlight_device *backlight_dev;
@@ -110,9 +161,6 @@ struct amdgpu_display_manager {
 
        struct mod_freesync *freesync_module;
 
-       /**
-        * Caches device atomic state for suspend/resume
-        */
        struct drm_atomic_state *cached_state;
 
        struct dm_comressor_info compressor;
@@ -160,8 +208,6 @@ struct amdgpu_dm_connector {
        struct mutex hpd_lock;
 
        bool fake_enable;
-
-       bool mst_connected;
 };
 
 #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
index be19e6861189c63e0fe85d16ad1a2bedc14cf58c..216e48cec71664f67eb7325b3dd0fc876d228a6a 100644 (file)
@@ -164,7 +164,7 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc)
         */
        stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
        ret = mod_color_calculate_regamma_params(stream->out_transfer_func,
-                                                gamma, true, adev->asic_type <= CHIP_RAVEN);
+                                                gamma, true, adev->asic_type <= CHIP_RAVEN, NULL);
        dc_gamma_release(&gamma);
        if (!ret) {
                stream->out_transfer_func->type = old_type;
index 01fc5717b657fb73ecb5b5d41ca03d511ba358a3..f088ac58597803d3c257ccdeaea656eb0dfd723f 100644 (file)
@@ -75,6 +75,11 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
                return -EINVAL;
        }
 
+       if (!stream_state) {
+               DRM_ERROR("No stream state for CRTC%d\n", crtc->index);
+               return -EINVAL;
+       }
+
        /* When enabling CRC, we should also disable dithering. */
        if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) {
                if (dc_stream_configure_crc(stream_state->ctx->dc,
index a212178f2edc21906e0609cdcc9d7a836bbee8b3..cd10f77cdeb062f57d646400d1ce63a23771cd59 100644 (file)
 #include "amdgpu_dm.h"
 #include "amdgpu_dm_irq.h"
 
+/**
+ * DOC: overview
+ *
+ * DM provides another layer of IRQ management on top of what the base driver
+ * already provides. This is something that could be cleaned up, and is a
+ * future TODO item.
+ *
+ * The base driver provides IRQ source registration with DRM, handler
+ * registration into the base driver's IRQ table, and a handler callback
+ * amdgpu_irq_handler(), with which DRM calls on interrupts. This generic
+ * handler looks up the IRQ table, and calls the respective
+ * &amdgpu_irq_src_funcs.process hookups.
+ *
+ * What DM provides on top are two IRQ tables specifically for top-half and
+ * bottom-half IRQ handling, with the bottom-half implementing workqueues:
+ *
+ * - &amdgpu_display_manager.irq_handler_list_high_tab
+ * - &amdgpu_display_manager.irq_handler_list_low_tab
+ *
+ * They override the base driver's IRQ table, and the effect can be seen
+ * in the hooks that DM provides for &amdgpu_irq_src_funcs.process. They
+ * are all set to the DM generic handler amdgpu_dm_irq_handler(), which looks up
+ * DM's IRQ tables. However, in order for base driver to recognize this hook, DM
+ * still needs to register the IRQ with the base driver. See
+ * dce110_register_irq_handlers() and dcn10_register_irq_handlers().
+ *
+ * To expose DC's hardware interrupt toggle to the base driver, DM implements
+ * &amdgpu_irq_src_funcs.set hooks. Base driver calls it through
+ * amdgpu_irq_update() to enable or disable the interrupt.
+ */
+
 /******************************************************************************
  * Private declarations.
  *****************************************************************************/
 
+/**
+ * struct amdgpu_dm_irq_handler_data - Data for DM interrupt handlers.
+ *
+ * @list: Linked list entry referencing the next/previous handler
+ * @handler: Handler function
+ * @handler_arg: Argument passed to the handler when triggered
+ * @dm: DM which this handler belongs to
+ * @irq_source: DC interrupt source that this handler is registered for
+ */
 struct amdgpu_dm_irq_handler_data {
        struct list_head list;
        interrupt_handler handler;
        void *handler_arg;
 
-       /* DM which this handler belongs to */
        struct amdgpu_display_manager *dm;
        /* DAL irq source which registered for this interrupt. */
        enum dc_irq_source irq_source;
@@ -68,7 +107,7 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
 }
 
 /**
- * dm_irq_work_func - Handle an IRQ outside of the interrupt handler proper.
+ * dm_irq_work_func() - Handle an IRQ outside of the interrupt handler proper.
  *
  * @work: work struct
  */
@@ -99,8 +138,8 @@ static void dm_irq_work_func(struct work_struct *work)
         * (The most common use is HPD interrupt) */
 }
 
-/**
- * Remove a handler and return a pointer to hander list from which the
+/*
+ * Remove a handler and return a pointer to handler list from which the
  * handler was removed.
  */
 static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
@@ -203,6 +242,24 @@ static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
  * Note: caller is responsible for input validation.
  *****************************************************************************/
 
+/**
+ * amdgpu_dm_irq_register_interrupt() - Register a handler within DM.
+ * @adev: The base driver device containing the DM device.
+ * @int_params: Interrupt parameters containing the source, and handler context
+ * @ih: Function pointer to the interrupt handler to register
+ * @handler_args: Arguments passed to the handler when the interrupt occurs
+ *
+ * Register an interrupt handler for the given IRQ source, under the given
+ * context. The context can either be high or low. High context handlers are
+ * executed directly within ISR context, while low context is executed within a
+ * workqueue, thereby allowing operations that sleep.
+ *
+ * Registered handlers are called in a FIFO manner, i.e. the most recently
+ * registered handler will be called first.
+ *
+ * Return: Handler data &struct amdgpu_dm_irq_handler_data containing the IRQ
+ *         source, handler function, and args
+ */
 void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
                                       struct dc_interrupt_params *int_params,
                                       void (*ih)(void *),
@@ -261,6 +318,15 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
        return handler_data;
 }
 
+/**
+ * amdgpu_dm_irq_unregister_interrupt() - Remove a handler from the DM IRQ table
+ * @adev: The base driver device containing the DM device
+ * @irq_source: IRQ source to remove the given handler from
+ * @ih: Function pointer to the interrupt handler to unregister
+ *
+ * Go through both low and high context IRQ tables, and find the given handler
+ * for the given irq source. If found, remove it. Otherwise, do nothing.
+ */
 void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
                                        enum dc_irq_source irq_source,
                                        void *ih)
@@ -295,6 +361,20 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
        }
 }
 
+/**
+ * amdgpu_dm_irq_init() - Initialize DM IRQ management
+ * @adev:  The base driver device containing the DM device
+ *
+ * Initialize DM's high and low context IRQ tables.
+ *
+ * The N by M table contains N IRQ sources, with M
+ * &struct amdgpu_dm_irq_handler_data hooked together in a linked list. The
+ * list_heads are initialized here. When an interrupt n is triggered, all m
+ * handlers are called in sequence, FIFO according to registration order.
+ *
+ * The low context table requires special steps to initialize, since handlers
+ * will be deferred to a workqueue. See &struct irq_list_head.
+ */
 int amdgpu_dm_irq_init(struct amdgpu_device *adev)
 {
        int src;
@@ -317,7 +397,12 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
        return 0;
 }
 
-/* DM IRQ and timer resource release */
+/**
+ * amdgpu_dm_irq_fini() - Tear down DM IRQ management
+ * @adev: The base driver device containing the DM device
+ *
+ * Flush all work within the low context IRQ table.
+ */
 void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
 {
        int src;
@@ -414,7 +499,7 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
        return 0;
 }
 
-/**
+/*
  * amdgpu_dm_irq_schedule_work - schedule all work items registered for the
  * "irq_source".
  */
@@ -439,8 +524,9 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
 
 }
 
-/** amdgpu_dm_irq_immediate_work
- *  Callback high irq work immediately, don't send to work queue
+/*
+ * amdgpu_dm_irq_immediate_work
+ * Callback high irq work immediately, don't send to work queue
  */
 static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
                                         enum dc_irq_source irq_source)
@@ -467,11 +553,14 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
        DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 }
 
-/*
- * amdgpu_dm_irq_handler
+/**
+ * amdgpu_dm_irq_handler - Generic DM IRQ handler
+ * @adev: amdgpu base driver device containing the DM device
+ * @source: Unused
+ * @entry: Data about the triggered interrupt
  *
- * Generic IRQ handler, calls all registered high irq work immediately, and
- * schedules work for low irq
+ * Calls all registered high irq work immediately, and schedules work for low
+ * irq. The DM IRQ table is used to find the corresponding handlers.
  */
 static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
                                 struct amdgpu_irq_src *source,
@@ -613,7 +702,7 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
        adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
 }
 
-/*
+/**
  * amdgpu_dm_hpd_init - hpd setup callback.
  *
  * @adev: amdgpu_device pointer
index 03601d717fed90708463fca143a6de50d6b750b1..d02c32a1039c02b97f02d721030ae82e546799df 100644 (file)
@@ -205,40 +205,6 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
        .atomic_get_property = amdgpu_dm_connector_atomic_get_property
 };
 
-void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
-{
-       struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
-       struct dc_sink *dc_sink;
-       struct dc_sink_init_data init_params = {
-                       .link = aconnector->dc_link,
-                       .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
-
-       /* FIXME none of this is safe. we shouldn't touch aconnector here in
-        * atomic_check
-        */
-
-       /*
-        * TODO: Need to further figure out why ddc.algo is NULL while MST port exists
-        */
-       if (!aconnector->port || !aconnector->port->aux.ddc.algo)
-               return;
-
-       ASSERT(aconnector->edid);
-
-       dc_sink = dc_link_add_remote_sink(
-               aconnector->dc_link,
-               (uint8_t *)aconnector->edid,
-               (aconnector->edid->extensions + 1) * EDID_LENGTH,
-               &init_params);
-
-       dc_sink->priv = aconnector;
-       aconnector->dc_sink = dc_sink;
-
-       if (aconnector->dc_sink)
-               amdgpu_dm_update_freesync_caps(
-                               connector, aconnector->edid);
-}
-
 static int dm_dp_mst_get_modes(struct drm_connector *connector)
 {
        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -319,12 +285,7 @@ dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector)
        struct amdgpu_device *adev = dev->dev_private;
        struct amdgpu_encoder *amdgpu_encoder;
        struct drm_encoder *encoder;
-       const struct drm_connector_helper_funcs *connector_funcs =
-               connector->base.helper_private;
-       struct drm_encoder *enc_master =
-               connector_funcs->best_encoder(&connector->base);
 
-       DRM_DEBUG_KMS("enc master is %p\n", enc_master);
        amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
        if (!amdgpu_encoder)
                return NULL;
@@ -354,25 +315,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
        struct amdgpu_device *adev = dev->dev_private;
        struct amdgpu_dm_connector *aconnector;
        struct drm_connector *connector;
-       struct drm_connector_list_iter conn_iter;
-
-       drm_connector_list_iter_begin(dev, &conn_iter);
-       drm_for_each_connector_iter(connector, &conn_iter) {
-               aconnector = to_amdgpu_dm_connector(connector);
-               if (aconnector->mst_port == master
-                               && !aconnector->port) {
-                       DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n",
-                                               aconnector, connector->base.id, aconnector->mst_port);
-
-                       aconnector->port = port;
-                       drm_connector_set_path_property(connector, pathprop);
-
-                       drm_connector_list_iter_end(&conn_iter);
-                       aconnector->mst_connected = true;
-                       return &aconnector->base;
-               }
-       }
-       drm_connector_list_iter_end(&conn_iter);
 
        aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
        if (!aconnector)
@@ -421,8 +363,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
         */
        amdgpu_dm_connector_funcs_reset(connector);
 
-       aconnector->mst_connected = true;
-
        DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
                        aconnector, connector->base.id, aconnector->mst_port);
 
@@ -434,6 +374,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
 static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
                                        struct drm_connector *connector)
 {
+       struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
+       struct drm_device *dev = master->base.dev;
+       struct amdgpu_device *adev = dev->dev_private;
        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 
        DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
@@ -447,7 +390,10 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
                aconnector->dc_sink = NULL;
        }
 
-       aconnector->mst_connected = false;
+       drm_connector_unregister(connector);
+       if (adev->mode_info.rfbdev)
+               drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector);
+       drm_connector_put(connector);
 }
 
 static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
@@ -458,18 +404,10 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
        drm_kms_helper_hotplug_event(dev);
 }
 
-static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
-{
-       mutex_lock(&connector->dev->mode_config.mutex);
-       drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
-       mutex_unlock(&connector->dev->mode_config.mutex);
-}
-
 static void dm_dp_mst_register_connector(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
        struct amdgpu_device *adev = dev->dev_private;
-       struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 
        if (adev->mode_info.rfbdev)
                drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
@@ -477,9 +415,6 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector)
                DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
 
        drm_connector_register(connector);
-
-       if (aconnector->mst_connected)
-               dm_dp_mst_link_status_reset(connector);
 }
 
 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
index 8cf51da26657e29e72062b34aeed7e5d827f9e21..2da851b40042aee9b79eb2c666d45c0f5061fee0 100644 (file)
@@ -31,6 +31,5 @@ struct amdgpu_dm_connector;
 
 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
                                       struct amdgpu_dm_connector *aconnector);
-void dm_dp_mst_dc_sink_create(struct drm_connector *connector);
 
 #endif
index 12001a006b2d8e1d0b5f3734c189e9faf23d94e5..9d2d6986b98394be67d038f0d0abdff2ef1cef83 100644 (file)
@@ -485,11 +485,11 @@ void pp_rv_set_display_requirement(struct pp_smu *pp,
                return;
 
        clock.clock_type = amd_pp_dcf_clock;
-       clock.clock_freq_in_khz = req->hard_min_dcefclk_khz;
+       clock.clock_freq_in_khz = req->hard_min_dcefclk_mhz * 1000;
        pp_funcs->display_clock_voltage_request(pp_handle, &clock);
 
        clock.clock_type = amd_pp_f_clock;
-       clock.clock_freq_in_khz = req->hard_min_fclk_khz;
+       clock.clock_freq_in_khz = req->hard_min_fclk_mhz * 1000;
        pp_funcs->display_clock_voltage_request(pp_handle, &clock);
 }
 
@@ -518,13 +518,13 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
                        wm_dce_clocks[i].wm_set_id =
                                        ranges->reader_wm_sets[i].wm_inst;
                wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
-                               ranges->reader_wm_sets[i].max_drain_clk_khz;
+                               ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
                wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
-                               ranges->reader_wm_sets[i].min_drain_clk_khz;
+                               ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
                wm_dce_clocks[i].wm_max_mem_clk_in_khz =
-                               ranges->reader_wm_sets[i].max_fill_clk_khz;
+                               ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
                wm_dce_clocks[i].wm_min_mem_clk_in_khz =
-                               ranges->reader_wm_sets[i].min_fill_clk_khz;
+                               ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
        }
 
        for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
@@ -534,13 +534,13 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
                        wm_soc_clocks[i].wm_set_id =
                                        ranges->writer_wm_sets[i].wm_inst;
                wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
-                               ranges->writer_wm_sets[i].max_fill_clk_khz;
+                               ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
                wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
-                               ranges->writer_wm_sets[i].min_fill_clk_khz;
+                               ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
                wm_soc_clocks[i].wm_max_mem_clk_in_khz =
-                               ranges->writer_wm_sets[i].max_drain_clk_khz;
+                               ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
                wm_soc_clocks[i].wm_min_mem_clk_in_khz =
-                               ranges->writer_wm_sets[i].min_drain_clk_khz;
+                               ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
        }
 
        pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges);
index 0e1dc1b1a48d9af59129e207d5b6ca1a1baf11c8..c2ab026aee91f31d22970d29c169f61ef0eff8a0 100644 (file)
@@ -2030,7 +2030,7 @@ static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object,
 static struct device_id device_type_from_device_id(uint16_t device_id)
 {
 
-       struct device_id result_device_id;
+       struct device_id result_device_id = {0};
 
        switch (device_id) {
        case ATOM_DEVICE_LCD1_SUPPORT:
index ff764da21b6ffd9cbf2a86e7543e8e09069189bf..751bb614fc0eae6b8b26647bc4f31b0893562431 100644 (file)
@@ -1884,6 +1884,8 @@ static const struct dc_vbios_funcs vbios_funcs = {
 
        .is_accelerated_mode = bios_parser_is_accelerated_mode,
 
+       .is_active_display = bios_is_active_display,
+
        .set_scratch_critical_state = bios_parser_set_scratch_critical_state,
 
 
index d4589470985c72cc5d649ad9e615c815ee0ceda7..fdda8aa8e3031f7f667da7e931d34de8f849f847 100644 (file)
@@ -88,3 +88,96 @@ uint32_t bios_get_vga_enabled_displays(
        return active_disp;
 }
 
+bool bios_is_active_display(
+               struct dc_bios *bios,
+               enum signal_type signal,
+               const struct connector_device_tag_info *device_tag)
+{
+       uint32_t active = 0;
+       uint32_t connected = 0;
+       uint32_t bios_scratch_0 = 0;
+       uint32_t bios_scratch_3 = 0;
+
+       switch (signal) {
+       case SIGNAL_TYPE_DVI_SINGLE_LINK:
+       case SIGNAL_TYPE_DVI_DUAL_LINK:
+       case SIGNAL_TYPE_HDMI_TYPE_A:
+       case SIGNAL_TYPE_DISPLAY_PORT:
+       case SIGNAL_TYPE_DISPLAY_PORT_MST:
+               {
+                       if (device_tag->dev_id.device_type == DEVICE_TYPE_DFP) {
+                               switch (device_tag->dev_id.enum_id)     {
+                               case 1:
+                                       {
+                                               active    = ATOM_S3_DFP1_ACTIVE;
+                                               connected = 0x0008;     //ATOM_DISPLAY_DFP1_CONNECT
+                                       }
+                                       break;
+
+                               case 2:
+                                       {
+                                               active    = ATOM_S3_DFP2_ACTIVE;
+                                               connected = 0x0080; //ATOM_DISPLAY_DFP2_CONNECT
+                                       }
+                                       break;
+
+                               case 3:
+                                       {
+                                               active    = ATOM_S3_DFP3_ACTIVE;
+                                               connected = 0x0200; //ATOM_DISPLAY_DFP3_CONNECT
+                                       }
+                                       break;
+
+                               case 4:
+                                       {
+                                               active    = ATOM_S3_DFP4_ACTIVE;
+                                               connected = 0x0400;     //ATOM_DISPLAY_DFP4_CONNECT
+                                       }
+                                       break;
+
+                               case 5:
+                                       {
+                                               active    = ATOM_S3_DFP5_ACTIVE;
+                                               connected = 0x0800; //ATOM_DISPLAY_DFP5_CONNECT
+                                       }
+                                       break;
+
+                               case 6:
+                                       {
+                                               active    = ATOM_S3_DFP6_ACTIVE;
+                                               connected = 0x0040; //ATOM_DISPLAY_DFP6_CONNECT
+                                       }
+                                       break;
+
+                               default:
+                                       break;
+                               }
+                               }
+                       }
+                       break;
+
+       case SIGNAL_TYPE_LVDS:
+       case SIGNAL_TYPE_EDP:
+               {
+                       active    = ATOM_S3_LCD1_ACTIVE;
+                       connected = 0x0002;     //ATOM_DISPLAY_LCD1_CONNECT
+               }
+               break;
+
+       default:
+               break;
+       }
+
+
+       if (bios->regs->BIOS_SCRATCH_0) /*follow up with other asic, todo*/
+               bios_scratch_0 = REG_READ(BIOS_SCRATCH_0);
+       if (bios->regs->BIOS_SCRATCH_3) /*follow up with other asic, todo*/
+               bios_scratch_3 = REG_READ(BIOS_SCRATCH_3);
+
+       bios_scratch_3 &= ATOM_S3_DEVICE_ACTIVE_MASK;
+       if ((active & bios_scratch_3) && (connected & bios_scratch_0))
+               return true;
+
+       return false;
+}
+
index 75a29e68fb2782ad667f858b4f235aeccdcc8ace..f33cac2147e32cc182d33aefabc09422be1af28b 100644 (file)
@@ -35,6 +35,10 @@ bool bios_is_accelerated_mode(struct dc_bios *bios);
 void bios_set_scratch_acc_mode_change(struct dc_bios *bios);
 void bios_set_scratch_critical_state(struct dc_bios *bios, bool state);
 uint32_t bios_get_vga_enabled_displays(struct dc_bios *bios);
+bool bios_is_active_display(
+       struct dc_bios *bios,
+       enum signal_type signal,
+       const struct connector_device_tag_info *device_tag);
 
 #define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type)))
 
index 3208188b7ed48208ec97c1d391e8637270bd693d..43e4a2be0fa677f9630799826c3fcb2b6adae477 100644 (file)
@@ -1423,27 +1423,27 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
        ranges.num_reader_wm_sets = WM_SET_COUNT;
        ranges.num_writer_wm_sets = WM_SET_COUNT;
        ranges.reader_wm_sets[0].wm_inst = WM_A;
-       ranges.reader_wm_sets[0].min_drain_clk_khz = min_dcfclk_khz;
-       ranges.reader_wm_sets[0].max_drain_clk_khz = overdrive;
-       ranges.reader_wm_sets[0].min_fill_clk_khz = min_fclk_khz;
-       ranges.reader_wm_sets[0].max_fill_clk_khz = overdrive;
+       ranges.reader_wm_sets[0].min_drain_clk_mhz = min_dcfclk_khz / 1000;
+       ranges.reader_wm_sets[0].max_drain_clk_mhz = overdrive / 1000;
+       ranges.reader_wm_sets[0].min_fill_clk_mhz = min_fclk_khz / 1000;
+       ranges.reader_wm_sets[0].max_fill_clk_mhz = overdrive / 1000;
        ranges.writer_wm_sets[0].wm_inst = WM_A;
-       ranges.writer_wm_sets[0].min_fill_clk_khz = socclk_khz;
-       ranges.writer_wm_sets[0].max_fill_clk_khz = overdrive;
-       ranges.writer_wm_sets[0].min_drain_clk_khz = min_fclk_khz;
-       ranges.writer_wm_sets[0].max_drain_clk_khz = overdrive;
+       ranges.writer_wm_sets[0].min_fill_clk_mhz = socclk_khz / 1000;
+       ranges.writer_wm_sets[0].max_fill_clk_mhz = overdrive / 1000;
+       ranges.writer_wm_sets[0].min_drain_clk_mhz = min_fclk_khz / 1000;
+       ranges.writer_wm_sets[0].max_drain_clk_mhz = overdrive / 1000;
 
        if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
                ranges.reader_wm_sets[0].wm_inst = WM_A;
-               ranges.reader_wm_sets[0].min_drain_clk_khz = 300000;
-               ranges.reader_wm_sets[0].max_drain_clk_khz = 5000000;
-               ranges.reader_wm_sets[0].min_fill_clk_khz = 800000;
-               ranges.reader_wm_sets[0].max_fill_clk_khz = 5000000;
+               ranges.reader_wm_sets[0].min_drain_clk_mhz = 300;
+               ranges.reader_wm_sets[0].max_drain_clk_mhz = 5000;
+               ranges.reader_wm_sets[0].min_fill_clk_mhz = 800;
+               ranges.reader_wm_sets[0].max_fill_clk_mhz = 5000;
                ranges.writer_wm_sets[0].wm_inst = WM_A;
-               ranges.writer_wm_sets[0].min_fill_clk_khz = 200000;
-               ranges.writer_wm_sets[0].max_fill_clk_khz = 5000000;
-               ranges.writer_wm_sets[0].min_drain_clk_khz = 800000;
-               ranges.writer_wm_sets[0].max_drain_clk_khz = 5000000;
+               ranges.writer_wm_sets[0].min_fill_clk_mhz = 200;
+               ranges.writer_wm_sets[0].max_fill_clk_mhz = 5000;
+               ranges.writer_wm_sets[0].min_drain_clk_mhz = 800;
+               ranges.writer_wm_sets[0].max_drain_clk_mhz = 5000;
        }
 
        ranges.reader_wm_sets[1] = ranges.writer_wm_sets[0];
index 7c491c91465fc5a86973931cd11abe7808f1d8b8..3279e26c344074d3e6333d3c3cb1ef91a841038b 100644 (file)
@@ -391,9 +391,11 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
                                == stream) {
 
                        pipes = &dc->current_state->res_ctx.pipe_ctx[i];
-                       dc->hwss.program_csc_matrix(pipes,
-                       stream->output_color_space,
-                       stream->csc_color_matrix.matrix);
+                       dc->hwss.program_output_csc(dc,
+                                       pipes,
+                                       stream->output_color_space,
+                                       stream->csc_color_matrix.matrix,
+                                       pipes->plane_res.hubp->opp_id);
                        ret = true;
                }
        }
@@ -941,7 +943,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
        if (!dcb->funcs->is_accelerated_mode(dcb))
                dc->hwss.enable_accelerated_mode(dc, context);
 
-       dc->hwss.set_bandwidth(dc, context, false);
+       dc->hwss.prepare_bandwidth(dc, context);
 
        /* re-program planes for existing stream, in case we need to
         * free up plane resource for later use
@@ -957,8 +959,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
        }
 
        /* Program hardware */
-       dc->hwss.ready_shared_resources(dc, context);
-
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                pipe = &context->res_ctx.pipe_ctx[i];
                dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
@@ -1012,7 +1012,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
        dc_enable_stereo(dc, context, dc_streams, context->stream_count);
 
        /* pplib is notified if disp_num changed */
-       dc->hwss.set_bandwidth(dc, context, true);
+       dc->hwss.optimize_bandwidth(dc, context);
 
        dc_release_state(dc->current_state);
 
@@ -1020,8 +1020,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
 
        dc_retain_state(dc->current_state);
 
-       dc->hwss.optimize_shared_resources(dc);
-
        return result;
 }
 
@@ -1063,7 +1061,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
 
        dc->optimized_required = false;
 
-       dc->hwss.set_bandwidth(dc, context, true);
+       dc->hwss.optimize_bandwidth(dc, context);
        return true;
 }
 
@@ -1369,35 +1367,6 @@ static struct dc_stream_status *stream_get_status(
 
 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
 
-static void notify_display_count_to_smu(
-               struct dc *dc,
-               struct dc_state *context)
-{
-       int i, display_count;
-       struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
-
-       /*
-        * if function pointer not set up, this message is
-        * sent as part of pplib_apply_display_requirements.
-        * So just return.
-        */
-       if (!pp_smu || !pp_smu->set_display_count)
-               return;
-
-       display_count = 0;
-       for (i = 0; i < context->stream_count; i++) {
-               const struct dc_stream_state *stream = context->streams[i];
-
-               /* only notify active stream */
-               if (stream->dpms_off)
-                       continue;
-
-               display_count++;
-       }
-
-       pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
-}
-
 static void commit_planes_do_stream_update(struct dc *dc,
                struct dc_stream_state *stream,
                struct dc_stream_update *stream_update,
@@ -1422,7 +1391,6 @@ static void commit_planes_do_stream_update(struct dc *dc,
                                        stream_update->adjust->v_total_max);
 
                        if (stream_update->periodic_fn_vsync_delta &&
-                                       pipe_ctx->stream_res.tg &&
                                        pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
                                pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
                                        pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing,
@@ -1448,19 +1416,13 @@ static void commit_planes_do_stream_update(struct dc *dc,
                        if (stream_update->dpms_off) {
                                if (*stream_update->dpms_off) {
                                        core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE);
-                                       dc->hwss.pplib_apply_display_requirements(
-                                               dc, dc->current_state);
-                                       notify_display_count_to_smu(dc, dc->current_state);
+                                       dc->hwss.optimize_bandwidth(dc, dc->current_state);
                                } else {
-                                       dc->hwss.pplib_apply_display_requirements(
-                                               dc, dc->current_state);
-                                       notify_display_count_to_smu(dc, dc->current_state);
+                                       dc->hwss.prepare_bandwidth(dc, dc->current_state);
                                        core_link_enable_stream(dc->current_state, pipe_ctx);
                                }
                        }
 
-
-
                        if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
                                if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
                                        // if otg funcs defined check if blanked before programming
@@ -1487,7 +1449,7 @@ static void commit_planes_for_stream(struct dc *dc,
        struct pipe_ctx *top_pipe_to_program = NULL;
 
        if (update_type == UPDATE_TYPE_FULL) {
-               dc->hwss.set_bandwidth(dc, context, false);
+               dc->hwss.prepare_bandwidth(dc, context);
                context_clock_trace(dc, context);
        }
 
index e1ebdf7b5eaf3080882e5c3ea99de0278d9acdb0..73d0495066189f990976d0e1cfbd9d9cea29f1dc 100644 (file)
@@ -311,7 +311,7 @@ void context_timing_trace(
 {
        int i;
        struct dc  *core_dc = dc;
-       int h_pos[MAX_PIPES], v_pos[MAX_PIPES];
+       int h_pos[MAX_PIPES] = {0}, v_pos[MAX_PIPES] = {0};
        struct crtc_position position;
        unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
        DC_LOGGER_INIT(dc->ctx->logger);
@@ -322,8 +322,7 @@ void context_timing_trace(
                /* get_position() returns CRTC vertical/horizontal counter
                 * hence not applicable for underlay pipe
                 */
-               if (pipe_ctx->stream == NULL
-                                || pipe_ctx->pipe_idx == underlay_idx)
+               if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx)
                        continue;
 
                pipe_ctx->stream_res.tg->funcs->get_position(pipe_ctx->stream_res.tg, &position);
@@ -333,7 +332,7 @@ void context_timing_trace(
        for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
 
-               if (pipe_ctx->stream == NULL)
+               if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx)
                        continue;
 
                TIMING_TRACE("OTG_%d   H_tot:%d  V_tot:%d   H_pos:%d  V_pos:%d\n",
index fb04a4ad141fdb68f68a747f6c4474a15e7da8a2..7ee9c033acbda70f302bdb5367683c7cd8823e2f 100644 (file)
@@ -1357,28 +1357,13 @@ static enum dc_status enable_link_dp(
        struct dc_link *link = stream->sink->link;
        struct dc_link_settings link_settings = {0};
        enum dp_panel_mode panel_mode;
-       enum dc_link_rate max_link_rate = LINK_RATE_HIGH2;
 
        /* get link settings for video mode timing */
        decide_link_settings(stream, &link_settings);
 
-       /* raise clock state for HBR3 if required. Confirmed with HW DCE/DPCS
-        * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
-        */
-       if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
-               max_link_rate = LINK_RATE_HIGH3;
-
-       if (link_settings.link_rate == max_link_rate) {
-               struct dc_clocks clocks = state->bw.dcn.clk;
-
-               /* dce/dcn compat, do not update dispclk */
-               clocks.dispclk_khz = 0;
-               /* 27mhz = 27000000hz= 27000khz */
-               clocks.phyclk_khz = link_settings.link_rate * 27000;
-
-               state->dis_clk->funcs->update_clocks(
-                               state->dis_clk, &clocks, false);
-       }
+       pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
+                       link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
+       state->dccg->funcs->update_clocks(state->dccg, state, false);
 
        dp_enable_link_phy(
                link,
@@ -1722,7 +1707,7 @@ static void write_i2c_retimer_setting(
                i2c_success = i2c_write(pipe_ctx, slave_address,
                                buffer, sizeof(buffer));
                RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
-                       offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n",
+                       offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
                        slave_address, buffer[0], buffer[1], i2c_success?1:0);
                if (!i2c_success)
                        /* Write failure */
@@ -1734,7 +1719,7 @@ static void write_i2c_retimer_setting(
                i2c_success = i2c_write(pipe_ctx, slave_address,
                                buffer, sizeof(buffer));
                RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
-                       offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n",
+                       offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
                        slave_address, buffer[0], buffer[1], i2c_success?1:0);
                if (!i2c_success)
                        /* Write failure */
@@ -2156,14 +2141,16 @@ int dc_link_get_backlight_level(const struct dc_link *link)
 {
        struct abm *abm = link->ctx->dc->res_pool->abm;
 
-       if (abm == NULL || abm->funcs->get_current_backlight_8_bit == NULL)
+       if (abm == NULL || abm->funcs->get_current_backlight == NULL)
                return DC_ERROR_UNEXPECTED;
 
-       return (int) abm->funcs->get_current_backlight_8_bit(abm);
+       return (int) abm->funcs->get_current_backlight(abm);
 }
 
-bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
-               uint32_t frame_ramp, const struct dc_stream_state *stream)
+bool dc_link_set_backlight_level(const struct dc_link *link,
+               uint32_t backlight_pwm_u16_16,
+               uint32_t frame_ramp,
+               const struct dc_stream_state *stream)
 {
        struct dc  *core_dc = link->ctx->dc;
        struct abm *abm = core_dc->res_pool->abm;
@@ -2175,19 +2162,17 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
 
        if ((dmcu == NULL) ||
                (abm == NULL) ||
-               (abm->funcs->set_backlight_level == NULL))
+               (abm->funcs->set_backlight_level_pwm == NULL))
                return false;
 
-       if (stream) {
-               if (stream->bl_pwm_level == EDP_BACKLIGHT_RAMP_DISABLE_LEVEL)
-                       frame_ramp = 0;
-
-               ((struct dc_stream_state *)stream)->bl_pwm_level = level;
-       }
+       if (stream)
+               ((struct dc_stream_state *)stream)->bl_pwm_level =
+                               backlight_pwm_u16_16;
 
        use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
 
-       DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", level, level);
+       DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
+                       backlight_pwm_u16_16, backlight_pwm_u16_16);
 
        if (dc_is_embedded_signal(link->connector_signal)) {
                if (stream != NULL) {
@@ -2204,9 +2189,9 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
                                                1;
                        }
                }
-               abm->funcs->set_backlight_level(
+               abm->funcs->set_backlight_level_pwm(
                                abm,
-                               level,
+                               backlight_pwm_u16_16,
                                frame_ramp,
                                controller_id,
                                use_smooth_brightness);
@@ -2220,7 +2205,7 @@ bool dc_link_set_abm_disable(const struct dc_link *link)
        struct dc  *core_dc = link->ctx->dc;
        struct abm *abm = core_dc->res_pool->abm;
 
-       if ((abm == NULL) || (abm->funcs->set_backlight_level == NULL))
+       if ((abm == NULL) || (abm->funcs->set_backlight_level_pwm == NULL))
                return false;
 
        abm->funcs->set_abm_immediate_disable(abm);
@@ -2609,6 +2594,10 @@ void core_link_enable_stream(
                core_dc->hwss.unblank_stream(pipe_ctx,
                        &pipe_ctx->stream->sink->link->cur_link_settings);
 
+               dc_link_set_backlight_level(pipe_ctx->stream->sink->link,
+                               pipe_ctx->stream->bl_pwm_level,
+                               0,
+                               pipe_ctx->stream);
        }
 
 }
index b6fe29b9fb65730ed44fe5f76c12f92605a5dfe0..fc65b005516723102ac7f34815b400662fb2c9b1 100644 (file)
@@ -499,8 +499,13 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
                        pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
        bool flip_vert_scan_dir = false, flip_horz_scan_dir = false;
 
+
        /*
-        * Need to calculate the scan direction for viewport to properly determine offset
+        * We need take horizontal mirror into account. On an unrotated surface this means
+        * that the viewport offset is actually the offset from the other side of source
+        * image so we have to subtract the right edge of the viewport from the right edge of
+        * the source window. Similar to mirror we need to take into account how offset is
+        * affected for 270/180 rotations
         */
        if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) {
                flip_vert_scan_dir = true;
@@ -510,6 +515,9 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
        else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
                flip_horz_scan_dir = true;
 
+       if (pipe_ctx->plane_state->horizontal_mirror)
+               flip_horz_scan_dir = !flip_horz_scan_dir;
+
        if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE ||
                stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
                pri_split = false;
@@ -540,45 +548,27 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
                        plane_state->clip_rect.y + plane_state->clip_rect.height - clip.y ;
 
        /* offset = surf_src.ofs + (clip.ofs - surface->dst_rect.ofs) * scl_ratio
+        * note: surf_src.ofs should be added after rotation/mirror offset direction
+        *       adjustment since it is already in viewport space
         * num_pixels = clip.num_pix * scl_ratio
         */
-       data->viewport.x = surf_src.x + (clip.x - plane_state->dst_rect.x) *
+       data->viewport.x = (clip.x - plane_state->dst_rect.x) *
                        surf_src.width / plane_state->dst_rect.width;
        data->viewport.width = clip.width *
                        surf_src.width / plane_state->dst_rect.width;
 
-       data->viewport.y = surf_src.y + (clip.y - plane_state->dst_rect.y) *
+       data->viewport.y = (clip.y - plane_state->dst_rect.y) *
                        surf_src.height / plane_state->dst_rect.height;
        data->viewport.height = clip.height *
                        surf_src.height / plane_state->dst_rect.height;
 
-       /* To transfer the x, y to correct coordinate on mirror image (camera).
-        * deg  0 : transfer x,
-        * deg 90 : don't need to transfer,
-        * deg180 : transfer y,
-        * deg270 : transfer x and y.
-        * To transfer the x, y to correct coordinate on non-mirror image (video).
-        * deg  0 : don't need to transfer,
-        * deg 90 : transfer y,
-        * deg180 : transfer x and y,
-        * deg270 : transfer x.
-        */
-       if (pipe_ctx->plane_state->horizontal_mirror) {
-               if (flip_horz_scan_dir && !flip_vert_scan_dir) {
-                       data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height;
-                       data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width;
-               } else if (flip_horz_scan_dir && flip_vert_scan_dir)
-                       data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height;
-               else {
-                       if (!flip_horz_scan_dir && !flip_vert_scan_dir)
-                               data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width;
-               }
-       } else {
-               if (flip_horz_scan_dir)
-                       data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width;
-               if (flip_vert_scan_dir)
-                       data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height;
-       }
+       if (flip_vert_scan_dir)
+               data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height;
+       if (flip_horz_scan_dir)
+               data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width;
+
+       data->viewport.x += surf_src.x;
+       data->viewport.y += surf_src.y;
 
        /* Round down, compensate in init */
        data->viewport_c.x = data->viewport.x / vpc_div;
@@ -773,22 +763,15 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct rect *r
        else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
                flip_horz_scan_dir = true;
 
+       if (pipe_ctx->plane_state->horizontal_mirror)
+                       flip_horz_scan_dir = !flip_horz_scan_dir;
+
        if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
                        pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
                rect_swap_helper(&src);
                rect_swap_helper(&data->viewport_c);
                rect_swap_helper(&data->viewport);
-
-               if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270 &&
-                       pipe_ctx->plane_state->horizontal_mirror) {
-                       flip_vert_scan_dir = true;
-               }
-               if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 &&
-                       pipe_ctx->plane_state->horizontal_mirror) {
-                       flip_vert_scan_dir = false;
-               }
-       } else if (pipe_ctx->plane_state->horizontal_mirror)
-                       flip_horz_scan_dir = !flip_horz_scan_dir;
+       }
 
        /*
         * Init calculated according to formula:
@@ -1115,9 +1098,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
        pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
                        pipe_ctx->plane_state->format);
 
-       if (pipe_ctx->stream->timing.flags.INTERLACE)
-               pipe_ctx->stream->dst.height *= 2;
-
        calculate_scaling_ratios(pipe_ctx);
 
        calculate_viewport(pipe_ctx);
@@ -1138,9 +1118,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 
        pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
        pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
-       if (pipe_ctx->stream->timing.flags.INTERLACE)
-               pipe_ctx->plane_res.scl_data.v_active *= 2;
-
 
        /* Taps calculations */
        if (pipe_ctx->plane_res.xfm != NULL)
@@ -1185,9 +1162,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
                                plane_state->dst_rect.x,
                                plane_state->dst_rect.y);
 
-       if (pipe_ctx->stream->timing.flags.INTERLACE)
-               pipe_ctx->stream->dst.height /= 2;
-
        return res;
 }
 
@@ -2071,7 +2045,7 @@ void dc_resource_state_construct(
                const struct dc *dc,
                struct dc_state *dst_ctx)
 {
-       dst_ctx->dis_clk = dc->res_pool->dccg;
+       dst_ctx->dccg = dc->res_pool->clk_mgr;
 }
 
 enum dc_status dc_validate_global_state(
index 2ac848a106bafc2a4588bcca1b02c23d409757a9..e113439aaa86f5597231f11644064e60279cda20 100644 (file)
@@ -106,6 +106,7 @@ static void construct(struct dc_stream_state *stream,
 
        stream->out_transfer_func = dc_create_transfer_func();
        stream->out_transfer_func->type = TF_TYPE_BYPASS;
+       stream->out_transfer_func->ctx = stream->ctx;
 }
 
 static void destruct(struct dc_stream_state *stream)
index 8fb3aefd195ca3e384babcac8e002106f5cb9e1a..c60c9b4c307531e118ecd4f986985dfa4388ad00 100644 (file)
@@ -44,6 +44,7 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state
 
        plane_state->in_transfer_func = dc_create_transfer_func();
        plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
+       plane_state->in_transfer_func->ctx = ctx;
 }
 
 static void destruct(struct dc_plane_state *plane_state)
index 199527171100b0ed7cbd34aae7468e0989efc9a6..d16a20c8479201515cc70fd344df96217e654f41 100644 (file)
@@ -38,7 +38,7 @@
 #include "inc/compressor.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.1.68"
+#define DC_VER "3.2.04"
 
 #define MAX_SURFACES 3
 #define MAX_STREAMS 6
@@ -169,6 +169,7 @@ struct link_training_settings;
 struct dc_config {
        bool gpu_vm_support;
        bool disable_disp_pll_sharing;
+       bool fbc_support;
 };
 
 enum visual_confirm {
@@ -249,8 +250,6 @@ struct dc_debug_options {
        bool disable_dmcu;
        bool disable_psr;
        bool force_abm_enable;
-       bool disable_hbup_pg;
-       bool disable_dpp_pg;
        bool disable_stereo_support;
        bool vsr_support;
        bool performance_trace;
@@ -304,11 +303,6 @@ struct dc {
        struct hw_sequencer_funcs hwss;
        struct dce_hwseq *hwseq;
 
-       /* temp store of dm_pp_display_configuration
-        * to compare to see if display config changed
-        */
-       struct dm_pp_display_configuration prev_display_config;
-
        bool optimized_required;
 
        /* FBC compressor */
index 8130b95ccc5349a18856cc0fd8b426444ef27ad0..a8b3cedf943171fbcdfbb9848f07e9edd5fa1875 100644 (file)
@@ -86,6 +86,10 @@ struct dc_vbios_funcs {
 
        bool (*is_accelerated_mode)(
                struct dc_bios *bios);
+       bool (*is_active_display)(
+               struct dc_bios *bios,
+               enum signal_type signal,
+               const struct connector_device_tag_info *device_tag);
        void (*set_scratch_critical_state)(
                struct dc_bios *bios,
                bool state);
@@ -141,6 +145,7 @@ struct dc_vbios_funcs {
 };
 
 struct bios_registers {
+       uint32_t BIOS_SCRATCH_0;
        uint32_t BIOS_SCRATCH_3;
        uint32_t BIOS_SCRATCH_6;
 };
index 3bfdccceb524427c40ac0a2b063710c2bb66bbe3..8738f27a87088d26a0bca47ebc9087f986037117 100644 (file)
@@ -138,9 +138,14 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
        return dc->links[link_index];
 }
 
-/* Set backlight level of an embedded panel (eDP, LVDS). */
-bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
-               uint32_t frame_ramp, const struct dc_stream_state *stream);
+/* Set backlight level of an embedded panel (eDP, LVDS).
+ * backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer
+ * and 16 bit fractional, where 1.0 is max backlight value.
+ */
+bool dc_link_set_backlight_level(const struct dc_link *dc_link,
+               uint32_t backlight_pwm_u16_16,
+               uint32_t frame_ramp,
+               const struct dc_stream_state *stream);
 
 int dc_link_get_backlight_level(const struct dc_link *dc_link);
 
index 8f7f0e8b341f234a0a30207718d111687bedc743..6d7b64a743cab77c98ff8595e702884cd090b659 100644 (file)
@@ -28,7 +28,7 @@
 
 DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
 dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
-dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
+dce_clk_mgr.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
 dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o
 
 AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
index 29294db1a96b775013635d080725e392cda4766b..2a342eae80fd2bd1cab42f25740a98df9a51d5ea 100644 (file)
@@ -54,7 +54,7 @@
 #define MCP_DISABLE_ABM_IMMEDIATELY 255
 
 
-static unsigned int get_current_backlight_16_bit(struct dce_abm *abm_dce)
+static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_abm *abm_dce)
 {
        uint64_t current_backlight;
        uint32_t round_result;
@@ -103,45 +103,21 @@ static unsigned int get_current_backlight_16_bit(struct dce_abm *abm_dce)
        return (uint32_t)(current_backlight);
 }
 
-static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level)
+static void driver_set_backlight_level(struct dce_abm *abm_dce,
+               uint32_t backlight_pwm_u16_16)
 {
-       uint32_t backlight_24bit;
-       uint32_t backlight_17bit;
        uint32_t backlight_16bit;
        uint32_t masked_pwm_period;
-       uint8_t rounding_bit;
        uint8_t bit_count;
        uint64_t active_duty_cycle;
        uint32_t pwm_period_bitcnt;
 
        /*
-        * 1. Convert 8-bit value to 17 bit U1.16 format
-        * (1 integer, 16 fractional bits)
-        */
-
-       /* 1.1 multiply 8 bit value by 0x10101 to get a 24 bit value,
-        * effectively multiplying value by 256/255
-        * eg. for a level of 0xEF, backlight_24bit = 0xEF * 0x10101 = 0xEFEFEF
-        */
-       backlight_24bit = level * 0x10101;
-
-       /* 1.2 The upper 16 bits of the 24 bit value is the fraction, lower 8
-        * used for rounding, take most significant bit of fraction for
-        * rounding, e.g. for 0xEFEFEF, rounding bit is 1
-        */
-       rounding_bit = (backlight_24bit >> 7) & 1;
-
-       /* 1.3 Add the upper 16 bits of the 24 bit value with the rounding bit
-        * resulting in a 17 bit value e.g. 0xEFF0 = (0xEFEFEF >> 8) + 1
-        */
-       backlight_17bit = (backlight_24bit >> 8) + rounding_bit;
-
-       /*
-        * 2. Find  16 bit backlight active duty cycle, where 0 <= backlight
+        * 1. Find  16 bit backlight active duty cycle, where 0 <= backlight
         * active duty cycle <= backlight period
         */
 
-       /* 2.1 Apply bitmask for backlight period value based on value of BITCNT
+       /* 1.1 Apply bitmask for backlight period value based on value of BITCNT
         */
        REG_GET_2(BL_PWM_PERIOD_CNTL,
                        BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt,
@@ -155,13 +131,13 @@ static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level)
        /* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */
        masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1);
 
-       /* 2.2 Calculate integer active duty cycle required upper 16 bits
+       /* 1.2 Calculate integer active duty cycle required upper 16 bits
         * contain integer component, lower 16 bits contain fractional component
         * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24
         */
-       active_duty_cycle = backlight_17bit * masked_pwm_period;
+       active_duty_cycle = backlight_pwm_u16_16 * masked_pwm_period;
 
-       /* 2.3 Calculate 16 bit active duty cycle from integer and fractional
+       /* 1.3 Calculate 16 bit active duty cycle from integer and fractional
         * components shift by bitCount then mask 16 bits and add rounding bit
         * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0
         */
@@ -170,23 +146,23 @@ static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level)
        backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1;
 
        /*
-        * 3. Program register with updated value
+        * 2. Program register with updated value
         */
 
-       /* 3.1 Lock group 2 backlight registers */
+       /* 2.1 Lock group 2 backlight registers */
 
        REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK,
                        BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1,
                        BL_PWM_GRP1_REG_LOCK, 1);
 
-       // 3.2 Write new active duty cycle
+       // 2.2 Write new active duty cycle
        REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit);
 
-       /* 3.3 Unlock group 2 backlight registers */
+       /* 2.3 Unlock group 2 backlight registers */
        REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
                        BL_PWM_GRP1_REG_LOCK, 0);
 
-       /* 5.4.4 Wait for pending bit to be cleared */
+       /* 3 Wait for pending bit to be cleared */
        REG_WAIT(BL_PWM_GRP1_REG_LOCK,
                        BL_PWM_GRP1_REG_UPDATE_PENDING, 0,
                        1, 10000);
@@ -194,16 +170,21 @@ static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level)
 
 static void dmcu_set_backlight_level(
        struct dce_abm *abm_dce,
-       uint32_t level,
+       uint32_t backlight_pwm_u16_16,
        uint32_t frame_ramp,
        uint32_t controller_id)
 {
-       unsigned int backlight_16_bit = (level * 0x10101) >> 8;
-       unsigned int backlight_17_bit = backlight_16_bit +
-                               (((backlight_16_bit & 0x80) >> 7) & 1);
+       unsigned int backlight_8_bit = 0;
        uint32_t rampingBoundary = 0xFFFF;
        uint32_t s2;
 
+       if (backlight_pwm_u16_16 & 0x10000)
+               // Check for max backlight condition
+               backlight_8_bit = 0xFF;
+       else
+               // Take MSB of fractional part since backlight is not max
+               backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF;
+
        /* set ramping boundary */
        REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary);
 
@@ -220,7 +201,7 @@ static void dmcu_set_backlight_level(
                        0, 1, 80000);
 
        /* setDMCUParam_BL */
-       REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_17_bit);
+       REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_pwm_u16_16);
 
        /* write ramp */
        if (controller_id == 0)
@@ -237,9 +218,9 @@ static void dmcu_set_backlight_level(
        s2 = REG_READ(BIOS_SCRATCH_2);
 
        s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
-       level &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >>
+       backlight_8_bit &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >>
                                ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
-       s2 |= (level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+       s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
 
        REG_WRITE(BIOS_SCRATCH_2, s2);
 }
@@ -247,7 +228,7 @@ static void dmcu_set_backlight_level(
 static void dce_abm_init(struct abm *abm)
 {
        struct dce_abm *abm_dce = TO_DCE_ABM(abm);
-       unsigned int backlight = get_current_backlight_16_bit(abm_dce);
+       unsigned int backlight = calculate_16_bit_backlight_from_pwm(abm_dce);
 
        REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103);
        REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101);
@@ -284,12 +265,26 @@ static void dce_abm_init(struct abm *abm)
                        ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1);
 }
 
-static unsigned int dce_abm_get_current_backlight_8_bit(struct abm *abm)
+static unsigned int dce_abm_get_current_backlight(struct abm *abm)
 {
        struct dce_abm *abm_dce = TO_DCE_ABM(abm);
        unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL);
 
-       return (backlight >> 8);
+       /* return backlight in hardware format which is unsigned 17 bits, with
+        * 1 bit integer and 16 bit fractional
+        */
+       return backlight;
+}
+
+static unsigned int dce_abm_get_target_backlight(struct abm *abm)
+{
+       struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+       unsigned int backlight = REG_READ(BL1_PWM_TARGET_ABM_LEVEL);
+
+       /* return backlight in hardware format which is unsigned 17 bits, with
+        * 1 bit integer and 16 bit fractional
+        */
+       return backlight;
 }
 
 static bool dce_abm_set_level(struct abm *abm, uint32_t level)
@@ -396,9 +391,9 @@ static bool dce_abm_init_backlight(struct abm *abm)
        return true;
 }
 
-static bool dce_abm_set_backlight_level(
+static bool dce_abm_set_backlight_level_pwm(
                struct abm *abm,
-               unsigned int backlight_level,
+               unsigned int backlight_pwm_u16_16,
                unsigned int frame_ramp,
                unsigned int controller_id,
                bool use_smooth_brightness)
@@ -406,16 +401,16 @@ static bool dce_abm_set_backlight_level(
        struct dce_abm *abm_dce = TO_DCE_ABM(abm);
 
        DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
-                       backlight_level, backlight_level);
+                       backlight_pwm_u16_16, backlight_pwm_u16_16);
 
        /* If DMCU is in reset state, DMCU is uninitialized */
        if (use_smooth_brightness)
                dmcu_set_backlight_level(abm_dce,
-                               backlight_level,
+                               backlight_pwm_u16_16,
                                frame_ramp,
                                controller_id);
        else
-               driver_set_backlight_level(abm_dce, backlight_level);
+               driver_set_backlight_level(abm_dce, backlight_pwm_u16_16);
 
        return true;
 }
@@ -424,8 +419,9 @@ static const struct abm_funcs dce_funcs = {
        .abm_init = dce_abm_init,
        .set_abm_level = dce_abm_set_level,
        .init_backlight = dce_abm_init_backlight,
-       .set_backlight_level = dce_abm_set_backlight_level,
-       .get_current_backlight_8_bit = dce_abm_get_current_backlight_8_bit,
+       .set_backlight_level_pwm = dce_abm_set_backlight_level_pwm,
+       .get_current_backlight = dce_abm_get_current_backlight,
+       .get_target_backlight = dce_abm_get_target_backlight,
        .set_abm_immediate_disable = dce_abm_immediate_disable
 };
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
new file mode 100644 (file)
index 0000000..9a28a04
--- /dev/null
@@ -0,0 +1,879 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_clk_mgr.h"
+
+#include "reg_helper.h"
+#include "dmcu.h"
+#include "core_types.h"
+#include "dal_asic_id.h"
+
+#define TO_DCE_CLK_MGR(clocks)\
+       container_of(clocks, struct dce_clk_mgr, base)
+
+#define REG(reg) \
+       (clk_mgr_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+       clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name
+
+#define CTX \
+       clk_mgr_dce->base.ctx
+#define DC_LOGGER \
+       clk_mgr->ctx->logger
+
+/* Max clock values for each state indexed by "enum clocks_state": */
+static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
+/* ClocksStateInvalid - should not be used */
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/* ClocksStateLow */
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
+/* ClocksStateNominal */
+{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
+/* ClocksStatePerformance */
+{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
+
+static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
+
+static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
+
+static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
+
+static int dentist_get_divider_from_did(int did)
+{
+       if (did < DENTIST_BASE_DID_1)
+               did = DENTIST_BASE_DID_1;
+       if (did > DENTIST_MAX_DID)
+               did = DENTIST_MAX_DID;
+
+       if (did < DENTIST_BASE_DID_2) {
+               return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
+                                                       * (did - DENTIST_BASE_DID_1);
+       } else if (did < DENTIST_BASE_DID_3) {
+               return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
+                                                       * (did - DENTIST_BASE_DID_2);
+       } else if (did < DENTIST_BASE_DID_4) {
+               return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
+                                                       * (did - DENTIST_BASE_DID_3);
+       } else {
+               return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP
+                                                       * (did - DENTIST_BASE_DID_4);
+       }
+}
+
+/* SW will adjust DP REF Clock average value for all purposes
+ * (DP DTO / DP Audio DTO and DP GTC)
+ if clock is spread for all cases:
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
+ calculations for DS_INCR/DS_MODULO (this is planned to be default case)
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
+ calculations (not planned to be used, but average clock should still
+ be valid)
+ -if SS enabled on DP Ref clock and HW de-spreading disabled
+ (should not be case with CIK) then SW should program all rates
+ generated according to average value (case as with previous ASICs)
+  */
+static int clk_mgr_adjust_dp_ref_freq_for_ss(struct dce_clk_mgr *clk_mgr_dce, int dp_ref_clk_khz)
+{
+       if (clk_mgr_dce->ss_on_dprefclk && clk_mgr_dce->dprefclk_ss_divider != 0) {
+               struct fixed31_32 ss_percentage = dc_fixpt_div_int(
+                               dc_fixpt_from_fraction(clk_mgr_dce->dprefclk_ss_percentage,
+                                                       clk_mgr_dce->dprefclk_ss_divider), 200);
+               struct fixed31_32 adj_dp_ref_clk_khz;
+
+               ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
+               adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
+               dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
+       }
+       return dp_ref_clk_khz;
+}
+
+static int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr)
+{
+       struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+       int dprefclk_wdivider;
+       int dprefclk_src_sel;
+       int dp_ref_clk_khz = 600000;
+       int target_div;
+
+       /* ASSERT DP Reference Clock source is from DFS*/
+       REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
+       ASSERT(dprefclk_src_sel == 0);
+
+       /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
+        * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
+       REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
+
+       /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
+       target_div = dentist_get_divider_from_did(dprefclk_wdivider);
+
+       /* Calculate the current DFS clock, in kHz.*/
+       dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+               * clk_mgr_dce->dentist_vco_freq_khz) / target_div;
+
+       return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, dp_ref_clk_khz);
+}
+
+int dce12_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr)
+{
+       struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+
+       return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, clk_mgr_dce->dprefclk_khz);
+}
+
+/* unit: in_khz before mode set, get pixel clock from context. ASIC register
+ * may not be programmed yet
+ */
+static uint32_t get_max_pixel_clock_for_all_paths(struct dc_state *context)
+{
+       uint32_t max_pix_clk = 0;
+       int i;
+
+       for (i = 0; i < MAX_PIPES; i++) {
+               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+               if (pipe_ctx->stream == NULL)
+                       continue;
+
+               /* do not check under lay */
+               if (pipe_ctx->top_pipe)
+                       continue;
+
+               if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
+                       max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+
+               /* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS
+                * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
+                */
+               if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
+                               pipe_ctx->stream_res.pix_clk_params.requested_sym_clk > max_pix_clk)
+                       max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_sym_clk;
+       }
+
+       return max_pix_clk;
+}
+
+static enum dm_pp_clocks_state dce_get_required_clocks_state(
+       struct clk_mgr *clk_mgr,
+       struct dc_state *context)
+{
+       struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+       int i;
+       enum dm_pp_clocks_state low_req_clk;
+       int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
+
+       /* Iterate from highest supported to lowest valid state, and update
+        * lowest RequiredState with the lowest state that satisfies
+        * all required clocks
+        */
+       for (i = clk_mgr_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
+               if (context->bw.dce.dispclk_khz >
+                               clk_mgr_dce->max_clks_by_state[i].display_clk_khz
+                       || max_pix_clk >
+                               clk_mgr_dce->max_clks_by_state[i].pixel_clk_khz)
+                       break;
+
+       low_req_clk = i + 1;
+       if (low_req_clk > clk_mgr_dce->max_clks_state) {
+               /* set max clock state for high phyclock, invalid on exceeding display clock */
+               if (clk_mgr_dce->max_clks_by_state[clk_mgr_dce->max_clks_state].display_clk_khz
+                               < context->bw.dce.dispclk_khz)
+                       low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
+               else
+                       low_req_clk = clk_mgr_dce->max_clks_state;
+       }
+
+       return low_req_clk;
+}
+
+static int dce_set_clock(
+       struct clk_mgr *clk_mgr,
+       int requested_clk_khz)
+{
+       struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+       struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
+       struct dc_bios *bp = clk_mgr->ctx->dc_bios;
+       int actual_clock = requested_clk_khz;
+       struct dmcu *dmcu = clk_mgr_dce->base.ctx->dc->res_pool->dmcu;
+
+       /* Make sure requested clock isn't lower than minimum threshold*/
+       if (requested_clk_khz > 0)
+               requested_clk_khz = max(requested_clk_khz,
+                               clk_mgr_dce->dentist_vco_freq_khz / 64);
+
+       /* Prepare to program display clock*/
+       pxl_clk_params.target_pixel_clock = requested_clk_khz;
+       pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+
+       if (clk_mgr_dce->dfs_bypass_active)
+               pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
+
+       bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
+
+       if (clk_mgr_dce->dfs_bypass_active) {
+               /* Cache the fixed display clock*/
+               clk_mgr_dce->dfs_bypass_disp_clk =
+                       pxl_clk_params.dfs_bypass_display_clock;
+               actual_clock = pxl_clk_params.dfs_bypass_display_clock;
+       }
+
+       /* from power down, we need mark the clock state as ClocksStateNominal
+        * from HWReset, so when resume we will call pplib voltage regulator.*/
+       if (requested_clk_khz == 0)
+               clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+
+       dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7);
+
+       return actual_clock;
+}
+
+int dce112_set_clock(struct clk_mgr *clk_mgr, int requested_clk_khz)
+{
+       struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+       struct bp_set_dce_clock_parameters dce_clk_params;
+       struct dc_bios *bp = clk_mgr->ctx->dc_bios;
+       struct dc *core_dc = clk_mgr->ctx->dc;
+       struct dmcu *dmcu = core_dc->res_pool->dmcu;
+       int actual_clock = requested_clk_khz;
+       /* Prepare to program display clock*/
+       memset(&dce_clk_params, 0, sizeof(dce_clk_params));
+
+       /* Make sure requested clock isn't lower than minimum threshold*/
+       if (requested_clk_khz > 0)
+               requested_clk_khz = max(requested_clk_khz,
+                               clk_mgr_dce->dentist_vco_freq_khz / 62);
+
+       dce_clk_params.target_clock_frequency = requested_clk_khz;
+       dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+       dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
+
+       bp->funcs->set_dce_clock(bp, &dce_clk_params);
+       actual_clock = dce_clk_params.target_clock_frequency;
+
+       /* from power down, we need mark the clock state as ClocksStateNominal
+        * from HWReset, so when resume we will call pplib voltage regulator.*/
+       if (requested_clk_khz == 0)
+               clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+
+       /*Program DP ref Clock*/
+       /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
+       dce_clk_params.target_clock_frequency = 0;
+       dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
+       if (!ASICREV_IS_VEGA20_P(clk_mgr->ctx->asic_id.hw_internal_rev))
+               dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
+                       (dce_clk_params.pll_id ==
+                                       CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
+       else
+               dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
+
+       bp->funcs->set_dce_clock(bp, &dce_clk_params);
+
+       if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+               if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock)
+                       dmcu->funcs->set_psr_wait_loop(dmcu,
+                                       actual_clock / 1000 / 7);
+       }
+
+       clk_mgr_dce->dfs_bypass_disp_clk = actual_clock;
+       return actual_clock;
+}
+
+static void dce_clock_read_integrated_info(struct dce_clk_mgr *clk_mgr_dce)
+{
+       struct dc_debug_options *debug = &clk_mgr_dce->base.ctx->dc->debug;
+       struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
+       struct integrated_info info = { { { 0 } } };
+       struct dc_firmware_info fw_info = { { 0 } };
+       int i;
+
+       if (bp->integrated_info)
+               info = *bp->integrated_info;
+
+       clk_mgr_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
+       if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
+               bp->funcs->get_firmware_info(bp, &fw_info);
+               clk_mgr_dce->dentist_vco_freq_khz =
+                       fw_info.smu_gpu_pll_output_freq;
+               if (clk_mgr_dce->dentist_vco_freq_khz == 0)
+                       clk_mgr_dce->dentist_vco_freq_khz = 3600000;
+       }
+
+       /*update the maximum display clock for each power state*/
+       for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+               enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
+
+               switch (i) {
+               case 0:
+                       clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
+                       break;
+
+               case 1:
+                       clk_state = DM_PP_CLOCKS_STATE_LOW;
+                       break;
+
+               case 2:
+                       clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
+                       break;
+
+               case 3:
+                       clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
+                       break;
+
+               default:
+                       clk_state = DM_PP_CLOCKS_STATE_INVALID;
+                       break;
+               }
+
+               /*Do not allow bad VBIOS/SBIOS to override with invalid values,
+                * check for > 100MHz*/
+               if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
+                       clk_mgr_dce->max_clks_by_state[clk_state].display_clk_khz =
+                               info.disp_clk_voltage[i].max_supported_clk;
+       }
+
+       if (!debug->disable_dfs_bypass && bp->integrated_info)
+               if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+                       clk_mgr_dce->dfs_bypass_enabled = true;
+}
+
+void dce_clock_read_ss_info(struct dce_clk_mgr *clk_mgr_dce)
+{
+       struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
+       int ss_info_num = bp->funcs->get_ss_entry_number(
+                       bp, AS_SIGNAL_TYPE_GPU_PLL);
+
+       if (ss_info_num) {
+               struct spread_spectrum_info info = { { 0 } };
+               enum bp_result result = bp->funcs->get_spread_spectrum_info(
+                               bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
+
+               /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
+                * even if SS not enabled and in that case
+                * SSInfo.spreadSpectrumPercentage !=0 would be sign
+                * that SS is enabled
+                */
+               if (result == BP_RESULT_OK &&
+                               info.spread_spectrum_percentage != 0) {
+                       clk_mgr_dce->ss_on_dprefclk = true;
+                       clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+
+                       if (info.type.CENTER_MODE == 0) {
+                               /* TODO: Currently for DP Reference clock we
+                                * need only SS percentage for
+                                * downspread */
+                               clk_mgr_dce->dprefclk_ss_percentage =
+                                               info.spread_spectrum_percentage;
+                       }
+
+                       return;
+               }
+
+               result = bp->funcs->get_spread_spectrum_info(
+                               bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);
+
+               /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
+                * even if SS not enabled and in that case
+                * SSInfo.spreadSpectrumPercentage !=0 would be sign
+                * that SS is enabled
+                */
+               if (result == BP_RESULT_OK &&
+                               info.spread_spectrum_percentage != 0) {
+                       clk_mgr_dce->ss_on_dprefclk = true;
+                       clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+
+                       if (info.type.CENTER_MODE == 0) {
+                               /* Currently for DP Reference clock we
+                                * need only SS percentage for
+                                * downspread */
+                               clk_mgr_dce->dprefclk_ss_percentage =
+                                               info.spread_spectrum_percentage;
+                       }
+               }
+       }
+}
+
+void dce110_fill_display_configs(
+       const struct dc_state *context,
+       struct dm_pp_display_configuration *pp_display_cfg)
+{
+       int j;
+       int num_cfgs = 0;
+
+       for (j = 0; j < context->stream_count; j++) {
+               int k;
+
+               const struct dc_stream_state *stream = context->streams[j];
+               struct dm_pp_single_disp_config *cfg =
+                       &pp_display_cfg->disp_configs[num_cfgs];
+               const struct pipe_ctx *pipe_ctx = NULL;
+
+               for (k = 0; k < MAX_PIPES; k++)
+                       if (stream == context->res_ctx.pipe_ctx[k].stream) {
+                               pipe_ctx = &context->res_ctx.pipe_ctx[k];
+                               break;
+                       }
+
+               ASSERT(pipe_ctx != NULL);
+
+               /* only notify active stream */
+               if (stream->dpms_off)
+                       continue;
+
+               num_cfgs++;
+               cfg->signal = pipe_ctx->stream->signal;
+               cfg->pipe_idx = pipe_ctx->stream_res.tg->inst;
+               cfg->src_height = stream->src.height;
+               cfg->src_width = stream->src.width;
+               cfg->ddi_channel_mapping =
+                       stream->sink->link->ddi_channel_mapping.raw;
+               cfg->transmitter =
+                       stream->sink->link->link_enc->transmitter;
+               cfg->link_settings.lane_count =
+                       stream->sink->link->cur_link_settings.lane_count;
+               cfg->link_settings.link_rate =
+                       stream->sink->link->cur_link_settings.link_rate;
+               cfg->link_settings.link_spread =
+                       stream->sink->link->cur_link_settings.link_spread;
+               cfg->sym_clock = stream->phy_pix_clk;
+               /* Round v_refresh*/
+               cfg->v_refresh = stream->timing.pix_clk_khz * 1000;
+               cfg->v_refresh /= stream->timing.h_total;
+               cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
+                                                       / stream->timing.v_total;
+       }
+
+       pp_display_cfg->display_count = num_cfgs;
+}
+
+static uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
+{
+       uint8_t j;
+       uint32_t min_vertical_blank_time = -1;
+
+       for (j = 0; j < context->stream_count; j++) {
+               struct dc_stream_state *stream = context->streams[j];
+               uint32_t vertical_blank_in_pixels = 0;
+               uint32_t vertical_blank_time = 0;
+
+               vertical_blank_in_pixels = stream->timing.h_total *
+                       (stream->timing.v_total
+                        - stream->timing.v_addressable);
+
+               vertical_blank_time = vertical_blank_in_pixels
+                       * 1000 / stream->timing.pix_clk_khz;
+
+               if (min_vertical_blank_time > vertical_blank_time)
+                       min_vertical_blank_time = vertical_blank_time;
+       }
+
+       return min_vertical_blank_time;
+}
+
+static int determine_sclk_from_bounding_box(
+               const struct dc *dc,
+               int required_sclk)
+{
+       int i;
+
+       /*
+        * Some asics do not give us sclk levels, so we just report the actual
+        * required sclk
+        */
+       if (dc->sclk_lvls.num_levels == 0)
+               return required_sclk;
+
+       for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
+               if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
+                       return dc->sclk_lvls.clocks_in_khz[i];
+       }
+       /*
+        * even maximum level could not satisfy requirement, this
+        * is unexpected at this stage, should have been caught at
+        * validation time
+        */
+       ASSERT(0);
+       return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
+}
+
+static void dce_pplib_apply_display_requirements(
+       struct dc *dc,
+       struct dc_state *context)
+{
+       struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+       pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
+
+       dce110_fill_display_configs(context, pp_display_cfg);
+
+       if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) !=  0)
+               dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+}
+
+static void dce11_pplib_apply_display_requirements(
+       struct dc *dc,
+       struct dc_state *context)
+{
+       struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+       pp_display_cfg->all_displays_in_sync =
+               context->bw.dce.all_displays_in_sync;
+       pp_display_cfg->nb_pstate_switch_disable =
+                       context->bw.dce.nbp_state_change_enable == false;
+       pp_display_cfg->cpu_cc6_disable =
+                       context->bw.dce.cpuc_state_change_enable == false;
+       pp_display_cfg->cpu_pstate_disable =
+                       context->bw.dce.cpup_state_change_enable == false;
+       pp_display_cfg->cpu_pstate_separation_time =
+                       context->bw.dce.blackout_recovery_time_us;
+
+       pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
+               / MEMORY_TYPE_MULTIPLIER_CZ;
+
+       pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
+                       dc,
+                       context->bw.dce.sclk_khz);
+
+       pp_display_cfg->min_engine_clock_deep_sleep_khz
+                       = context->bw.dce.sclk_deep_sleep_khz;
+
+       pp_display_cfg->avail_mclk_switch_time_us =
+                                               dce110_get_min_vblank_time_us(context);
+       /* TODO: dce11.2*/
+       pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
+
+       pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz;
+
+       dce110_fill_display_configs(context, pp_display_cfg);
+
+       /* TODO: is this still applicable?*/
+       if (pp_display_cfg->display_count == 1) {
+               const struct dc_crtc_timing *timing =
+                       &context->streams[0]->timing;
+
+               pp_display_cfg->crtc_index =
+                       pp_display_cfg->disp_configs[0].pipe_idx;
+               pp_display_cfg->line_time_in_us = timing->h_total * 1000 / timing->pix_clk_khz;
+       }
+
+       if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) !=  0)
+               dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+}
+
+static void dce_update_clocks(struct clk_mgr *clk_mgr,
+                       struct dc_state *context,
+                       bool safe_to_lower)
+{
+       struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+       struct dm_pp_power_level_change_request level_change_req;
+       int unpatched_disp_clk = context->bw.dce.dispclk_khz;
+
+       /*TODO: W/A for dal3 linux, investigate why this works */
+       if (!clk_mgr_dce->dfs_bypass_active)
+               context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+
+       level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
+       /* get max clock state from PPLIB */
+       if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
+                       || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
+               if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req))
+                       clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
+       }
+
+       if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+               context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
+               clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; 
+       }
+       dce_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
+
+       context->bw.dce.dispclk_khz = unpatched_disp_clk;
+}
+
+static void dce11_update_clocks(struct clk_mgr *clk_mgr,
+                       struct dc_state *context,
+                       bool safe_to_lower)
+{
+       struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+       struct dm_pp_power_level_change_request level_change_req;
+
+       level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
+       /* get max clock state from PPLIB */
+       if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
+                       || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
+               if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req))
+                       clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
+       }
+
+       if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+               context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
+               clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+       }
+       dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
+}
+
+static void dce112_update_clocks(struct clk_mgr *clk_mgr,
+                       struct dc_state *context,
+                       bool safe_to_lower)
+{
+       struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+       struct dm_pp_power_level_change_request level_change_req;
+
+       level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
+       /* get max clock state from PPLIB */
+       if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
+                       || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
+               if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req))
+                       clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
+       }
+
+       if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+               context->bw.dce.dispclk_khz = dce112_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
+               clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+       }
+       dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
+}
+
+static void dce12_update_clocks(struct clk_mgr *clk_mgr,
+                       struct dc_state *context,
+                       bool safe_to_lower)
+{
+       struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+       struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+       int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
+       int unpatched_disp_clk = context->bw.dce.dispclk_khz;
+
+       /*TODO: W/A for dal3 linux, investigate why this works */
+       if (!clk_mgr_dce->dfs_bypass_active)
+               context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+
+       if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+               clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
+               clock_voltage_req.clocks_in_khz = context->bw.dce.dispclk_khz;
+               context->bw.dce.dispclk_khz = dce112_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
+               clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+
+               dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req);
+       }
+
+       if (should_set_clock(safe_to_lower, max_pix_clk, clk_mgr->clks.phyclk_khz)) {
+               clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
+               clock_voltage_req.clocks_in_khz = max_pix_clk;
+               clk_mgr->clks.phyclk_khz = max_pix_clk;
+
+               dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req);
+       }
+       dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
+
+       context->bw.dce.dispclk_khz = unpatched_disp_clk;
+}
+
+static const struct clk_mgr_funcs dce120_funcs = {
+       .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+       .update_clocks = dce12_update_clocks
+};
+
+static const struct clk_mgr_funcs dce112_funcs = {
+       .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+       .update_clocks = dce112_update_clocks
+};
+
+static const struct clk_mgr_funcs dce110_funcs = {
+       .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+       .update_clocks = dce11_update_clocks,
+};
+
+static const struct clk_mgr_funcs dce_funcs = {
+       .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+       .update_clocks = dce_update_clocks
+};
+
+static void dce_clk_mgr_construct(
+       struct dce_clk_mgr *clk_mgr_dce,
+       struct dc_context *ctx,
+       const struct clk_mgr_registers *regs,
+       const struct clk_mgr_shift *clk_shift,
+       const struct clk_mgr_mask *clk_mask)
+{
+       struct clk_mgr *base = &clk_mgr_dce->base;
+       struct dm_pp_static_clock_info static_clk_info = {0};
+
+       base->ctx = ctx;
+       base->funcs = &dce_funcs;
+
+       clk_mgr_dce->regs = regs;
+       clk_mgr_dce->clk_mgr_shift = clk_shift;
+       clk_mgr_dce->clk_mgr_mask = clk_mask;
+
+       clk_mgr_dce->dfs_bypass_disp_clk = 0;
+
+       clk_mgr_dce->dprefclk_ss_percentage = 0;
+       clk_mgr_dce->dprefclk_ss_divider = 1000;
+       clk_mgr_dce->ss_on_dprefclk = false;
+
+
+       if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+               clk_mgr_dce->max_clks_state = static_clk_info.max_clocks_state;
+       else
+               clk_mgr_dce->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+       clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
+
+       dce_clock_read_integrated_info(clk_mgr_dce);
+       dce_clock_read_ss_info(clk_mgr_dce);
+}
+
+struct clk_mgr *dce_clk_mgr_create(
+       struct dc_context *ctx,
+       const struct clk_mgr_registers *regs,
+       const struct clk_mgr_shift *clk_shift,
+       const struct clk_mgr_mask *clk_mask)
+{
+       struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+       if (clk_mgr_dce == NULL) {
+               BREAK_TO_DEBUGGER();
+               return NULL;
+       }
+
+       memcpy(clk_mgr_dce->max_clks_by_state,
+               dce80_max_clks_by_state,
+               sizeof(dce80_max_clks_by_state));
+
+       dce_clk_mgr_construct(
+               clk_mgr_dce, ctx, regs, clk_shift, clk_mask);
+
+       return &clk_mgr_dce->base;
+}
+
+struct clk_mgr *dce110_clk_mgr_create(
+       struct dc_context *ctx,
+       const struct clk_mgr_registers *regs,
+       const struct clk_mgr_shift *clk_shift,
+       const struct clk_mgr_mask *clk_mask)
+{
+       struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+       if (clk_mgr_dce == NULL) {
+               BREAK_TO_DEBUGGER();
+               return NULL;
+       }
+
+       memcpy(clk_mgr_dce->max_clks_by_state,
+               dce110_max_clks_by_state,
+               sizeof(dce110_max_clks_by_state));
+
+       dce_clk_mgr_construct(
+               clk_mgr_dce, ctx, regs, clk_shift, clk_mask);
+
+       clk_mgr_dce->base.funcs = &dce110_funcs;
+
+       return &clk_mgr_dce->base;
+}
+
+struct clk_mgr *dce112_clk_mgr_create(
+       struct dc_context *ctx,
+       const struct clk_mgr_registers *regs,
+       const struct clk_mgr_shift *clk_shift,
+       const struct clk_mgr_mask *clk_mask)
+{
+       struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+       if (clk_mgr_dce == NULL) {
+               BREAK_TO_DEBUGGER();
+               return NULL;
+       }
+
+       memcpy(clk_mgr_dce->max_clks_by_state,
+               dce112_max_clks_by_state,
+               sizeof(dce112_max_clks_by_state));
+
+       dce_clk_mgr_construct(
+               clk_mgr_dce, ctx, regs, clk_shift, clk_mask);
+
+       clk_mgr_dce->base.funcs = &dce112_funcs;
+
+       return &clk_mgr_dce->base;
+}
+
+struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx)
+{
+       struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+       if (clk_mgr_dce == NULL) {
+               BREAK_TO_DEBUGGER();
+               return NULL;
+       }
+
+       memcpy(clk_mgr_dce->max_clks_by_state,
+               dce120_max_clks_by_state,
+               sizeof(dce120_max_clks_by_state));
+
+       dce_clk_mgr_construct(
+               clk_mgr_dce, ctx, NULL, NULL, NULL);
+
+       clk_mgr_dce->dprefclk_khz = 600000;
+       clk_mgr_dce->base.funcs = &dce120_funcs;
+
+       return &clk_mgr_dce->base;
+}
+
+void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr)
+{
+       struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(*clk_mgr);
+
+       kfree(clk_mgr_dce);
+       *clk_mgr = NULL;
+}
similarity index 55%
rename from drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
rename to drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
index 34fdb386c884855dc5e0edd4b98653eb2e0bde54..046077797416a6c9b9ba7bff24c5bc586244bc59 100644 (file)
  */
 
 
-#ifndef _DCE_CLOCKS_H_
-#define _DCE_CLOCKS_H_
+#ifndef _DCE_CLK_MGR_H_
+#define _DCE_CLK_MGR_H_
 
-#include "display_clock.h"
+#include "clk_mgr.h"
+#include "dccg.h"
+
+#define MEMORY_TYPE_MULTIPLIER_CZ 4
 
 #define CLK_COMMON_REG_LIST_DCE_BASE() \
        .DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
        type DENTIST_DISPCLK_WDIVIDER; \
        type DENTIST_DISPCLK_CHG_DONE;
 
-struct dccg_shift {
+struct clk_mgr_shift {
        CLK_REG_FIELD_LIST(uint8_t)
 };
 
-struct dccg_mask {
+struct clk_mgr_mask {
        CLK_REG_FIELD_LIST(uint32_t)
 };
 
-struct dccg_registers {
+struct clk_mgr_registers {
        uint32_t DPREFCLK_CNTL;
        uint32_t DENTIST_DISPCLK_CNTL;
 };
 
-struct dce_dccg {
-       struct dccg base;
-       const struct dccg_registers *regs;
-       const struct dccg_shift *clk_shift;
-       const struct dccg_mask *clk_mask;
+struct state_dependent_clocks {
+       int display_clk_khz;
+       int pixel_clk_khz;
+};
+
+struct dce_clk_mgr {
+       struct clk_mgr base;
+       const struct clk_mgr_registers *regs;
+       const struct clk_mgr_shift *clk_mgr_shift;
+       const struct clk_mgr_mask *clk_mgr_mask;
+
+       struct dccg *dccg;
 
        struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
 
@@ -91,33 +101,68 @@ struct dce_dccg {
        /* DPREFCLK SS percentage Divider (100 or 1000) */
        int dprefclk_ss_divider;
        int dprefclk_khz;
+
+       enum dm_pp_clocks_state max_clks_state;
+       enum dm_pp_clocks_state cur_min_clks_state;
 };
 
+/* Starting DID for each range */
+enum dentist_base_divider_id {
+       DENTIST_BASE_DID_1 = 0x08,
+       DENTIST_BASE_DID_2 = 0x40,
+       DENTIST_BASE_DID_3 = 0x60,
+       DENTIST_BASE_DID_4 = 0x7e,
+       DENTIST_MAX_DID = 0x7f
+};
 
-struct dccg *dce_dccg_create(
-       struct dc_context *ctx,
-       const struct dccg_registers *regs,
-       const struct dccg_shift *clk_shift,
-       const struct dccg_mask *clk_mask);
+/* Starting point and step size for each divider range.*/
+enum dentist_divider_range {
+       DENTIST_DIVIDER_RANGE_1_START = 8,   /* 2.00  */
+       DENTIST_DIVIDER_RANGE_1_STEP  = 1,   /* 0.25  */
+       DENTIST_DIVIDER_RANGE_2_START = 64,  /* 16.00 */
+       DENTIST_DIVIDER_RANGE_2_STEP  = 2,   /* 0.50  */
+       DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
+       DENTIST_DIVIDER_RANGE_3_STEP  = 4,   /* 1.00  */
+       DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */
+       DENTIST_DIVIDER_RANGE_4_STEP  = 264, /* 66.00 */
+       DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
+};
+
+static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
+{
+       return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
+}
+
+void dce_clock_read_ss_info(struct dce_clk_mgr *dccg_dce);
+
+int dce12_get_dp_ref_freq_khz(struct clk_mgr *dccg);
 
-struct dccg *dce110_dccg_create(
+void dce110_fill_display_configs(
+       const struct dc_state *context,
+       struct dm_pp_display_configuration *pp_display_cfg);
+
+int dce112_set_clock(struct clk_mgr *dccg, int requested_clk_khz);
+
+struct clk_mgr *dce_clk_mgr_create(
        struct dc_context *ctx,
-       const struct dccg_registers *regs,
-       const struct dccg_shift *clk_shift,
-       const struct dccg_mask *clk_mask);
+       const struct clk_mgr_registers *regs,
+       const struct clk_mgr_shift *clk_shift,
+       const struct clk_mgr_mask *clk_mask);
 
-struct dccg *dce112_dccg_create(
+struct clk_mgr *dce110_clk_mgr_create(
        struct dc_context *ctx,
-       const struct dccg_registers *regs,
-       const struct dccg_shift *clk_shift,
-       const struct dccg_mask *clk_mask);
+       const struct clk_mgr_registers *regs,
+       const struct clk_mgr_shift *clk_shift,
+       const struct clk_mgr_mask *clk_mask);
 
-struct dccg *dce120_dccg_create(struct dc_context *ctx);
+struct clk_mgr *dce112_clk_mgr_create(
+       struct dc_context *ctx,
+       const struct clk_mgr_registers *regs,
+       const struct clk_mgr_shift *clk_shift,
+       const struct clk_mgr_mask *clk_mask);
 
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
-struct dccg *dcn1_dccg_create(struct dc_context *ctx);
-#endif
+struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx);
 
-void dce_dccg_destroy(struct dccg **dccg);
+void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr);
 
-#endif /* _DCE_CLOCKS_H_ */
+#endif /* _DCE_CLK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
deleted file mode 100644 (file)
index d89a097..0000000
+++ /dev/null
@@ -1,947 +0,0 @@
-/*
- * Copyright 2012-16 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#include "dce_clocks.h"
-#include "dm_services.h"
-#include "reg_helper.h"
-#include "fixed31_32.h"
-#include "bios_parser_interface.h"
-#include "dc.h"
-#include "dmcu.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
-#include "dcn_calcs.h"
-#endif
-#include "core_types.h"
-#include "dc_types.h"
-#include "dal_asic_id.h"
-
-#define TO_DCE_CLOCKS(clocks)\
-       container_of(clocks, struct dce_dccg, base)
-
-#define REG(reg) \
-       (clk_dce->regs->reg)
-
-#undef FN
-#define FN(reg_name, field_name) \
-       clk_dce->clk_shift->field_name, clk_dce->clk_mask->field_name
-
-#define CTX \
-       clk_dce->base.ctx
-#define DC_LOGGER \
-       clk->ctx->logger
-
-/* Max clock values for each state indexed by "enum clocks_state": */
-static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
-/* ClocksStateInvalid - should not be used */
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/* ClocksStateLow */
-{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
-/* ClocksStateNominal */
-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
-/* ClocksStatePerformance */
-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
-
-static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
-/*ClocksStateInvalid - should not be used*/
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
-{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
-/*ClocksStateLow*/
-{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
-/*ClocksStateNominal*/
-{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
-/*ClocksStatePerformance*/
-{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
-
-static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
-/*ClocksStateInvalid - should not be used*/
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
-{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
-/*ClocksStateLow*/
-{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
-/*ClocksStateNominal*/
-{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
-/*ClocksStatePerformance*/
-{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
-
-static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
-/*ClocksStateInvalid - should not be used*/
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/*ClocksStateLow*/
-{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
-/*ClocksStateNominal*/
-{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
-/*ClocksStatePerformance*/
-{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
-
-/* Starting DID for each range */
-enum dentist_base_divider_id {
-       DENTIST_BASE_DID_1 = 0x08,
-       DENTIST_BASE_DID_2 = 0x40,
-       DENTIST_BASE_DID_3 = 0x60,
-       DENTIST_BASE_DID_4 = 0x7e,
-       DENTIST_MAX_DID = 0x7f
-};
-
-/* Starting point and step size for each divider range.*/
-enum dentist_divider_range {
-       DENTIST_DIVIDER_RANGE_1_START = 8,   /* 2.00  */
-       DENTIST_DIVIDER_RANGE_1_STEP  = 1,   /* 0.25  */
-       DENTIST_DIVIDER_RANGE_2_START = 64,  /* 16.00 */
-       DENTIST_DIVIDER_RANGE_2_STEP  = 2,   /* 0.50  */
-       DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
-       DENTIST_DIVIDER_RANGE_3_STEP  = 4,   /* 1.00  */
-       DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */
-       DENTIST_DIVIDER_RANGE_4_STEP  = 264, /* 66.00 */
-       DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
-};
-
-static int dentist_get_divider_from_did(int did)
-{
-       if (did < DENTIST_BASE_DID_1)
-               did = DENTIST_BASE_DID_1;
-       if (did > DENTIST_MAX_DID)
-               did = DENTIST_MAX_DID;
-
-       if (did < DENTIST_BASE_DID_2) {
-               return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
-                                                       * (did - DENTIST_BASE_DID_1);
-       } else if (did < DENTIST_BASE_DID_3) {
-               return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
-                                                       * (did - DENTIST_BASE_DID_2);
-       } else if (did < DENTIST_BASE_DID_4) {
-               return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
-                                                       * (did - DENTIST_BASE_DID_3);
-       } else {
-               return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP
-                                                       * (did - DENTIST_BASE_DID_4);
-       }
-}
-
-/* SW will adjust DP REF Clock average value for all purposes
- * (DP DTO / DP Audio DTO and DP GTC)
- if clock is spread for all cases:
- -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
- calculations for DS_INCR/DS_MODULO (this is planned to be default case)
- -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
- calculations (not planned to be used, but average clock should still
- be valid)
- -if SS enabled on DP Ref clock and HW de-spreading disabled
- (should not be case with CIK) then SW should program all rates
- generated according to average value (case as with previous ASICs)
-  */
-static int dccg_adjust_dp_ref_freq_for_ss(struct dce_dccg *clk_dce, int dp_ref_clk_khz)
-{
-       if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
-               struct fixed31_32 ss_percentage = dc_fixpt_div_int(
-                               dc_fixpt_from_fraction(clk_dce->dprefclk_ss_percentage,
-                                                       clk_dce->dprefclk_ss_divider), 200);
-               struct fixed31_32 adj_dp_ref_clk_khz;
-
-               ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
-               adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
-               dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
-       }
-       return dp_ref_clk_khz;
-}
-
-static int dce_get_dp_ref_freq_khz(struct dccg *clk)
-{
-       struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
-       int dprefclk_wdivider;
-       int dprefclk_src_sel;
-       int dp_ref_clk_khz = 600000;
-       int target_div;
-
-       /* ASSERT DP Reference Clock source is from DFS*/
-       REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
-       ASSERT(dprefclk_src_sel == 0);
-
-       /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
-        * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
-       REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
-
-       /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
-       target_div = dentist_get_divider_from_did(dprefclk_wdivider);
-
-       /* Calculate the current DFS clock, in kHz.*/
-       dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
-               * clk_dce->dentist_vco_freq_khz) / target_div;
-
-       return dccg_adjust_dp_ref_freq_for_ss(clk_dce, dp_ref_clk_khz);
-}
-
-static int dce12_get_dp_ref_freq_khz(struct dccg *clk)
-{
-       struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
-
-       return dccg_adjust_dp_ref_freq_for_ss(clk_dce, clk_dce->dprefclk_khz);
-}
-
-static enum dm_pp_clocks_state dce_get_required_clocks_state(
-       struct dccg *clk,
-       struct dc_clocks *req_clocks)
-{
-       struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
-       int i;
-       enum dm_pp_clocks_state low_req_clk;
-
-       /* Iterate from highest supported to lowest valid state, and update
-        * lowest RequiredState with the lowest state that satisfies
-        * all required clocks
-        */
-       for (i = clk->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
-               if (req_clocks->dispclk_khz >
-                               clk_dce->max_clks_by_state[i].display_clk_khz
-                       || req_clocks->phyclk_khz >
-                               clk_dce->max_clks_by_state[i].pixel_clk_khz)
-                       break;
-
-       low_req_clk = i + 1;
-       if (low_req_clk > clk->max_clks_state) {
-               /* set max clock state for high phyclock, invalid on exceeding display clock */
-               if (clk_dce->max_clks_by_state[clk->max_clks_state].display_clk_khz
-                               < req_clocks->dispclk_khz)
-                       low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
-               else
-                       low_req_clk = clk->max_clks_state;
-       }
-
-       return low_req_clk;
-}
-
-static int dce_set_clock(
-       struct dccg *clk,
-       int requested_clk_khz)
-{
-       struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
-       struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
-       struct dc_bios *bp = clk->ctx->dc_bios;
-       int actual_clock = requested_clk_khz;
-
-       /* Make sure requested clock isn't lower than minimum threshold*/
-       if (requested_clk_khz > 0)
-               requested_clk_khz = max(requested_clk_khz,
-                               clk_dce->dentist_vco_freq_khz / 64);
-
-       /* Prepare to program display clock*/
-       pxl_clk_params.target_pixel_clock = requested_clk_khz;
-       pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
-
-       if (clk_dce->dfs_bypass_active)
-               pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
-
-       bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
-
-       if (clk_dce->dfs_bypass_active) {
-               /* Cache the fixed display clock*/
-               clk_dce->dfs_bypass_disp_clk =
-                       pxl_clk_params.dfs_bypass_display_clock;
-               actual_clock = pxl_clk_params.dfs_bypass_display_clock;
-       }
-
-       /* from power down, we need mark the clock state as ClocksStateNominal
-        * from HWReset, so when resume we will call pplib voltage regulator.*/
-       if (requested_clk_khz == 0)
-               clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
-       return actual_clock;
-}
-
-static int dce_psr_set_clock(
-       struct dccg *clk,
-       int requested_clk_khz)
-{
-       struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
-       struct dc_context *ctx = clk_dce->base.ctx;
-       struct dc *core_dc = ctx->dc;
-       struct dmcu *dmcu = core_dc->res_pool->dmcu;
-       int actual_clk_khz = requested_clk_khz;
-
-       actual_clk_khz = dce_set_clock(clk, requested_clk_khz);
-
-       dmcu->funcs->set_psr_wait_loop(dmcu, actual_clk_khz / 1000 / 7);
-       return actual_clk_khz;
-}
-
-static int dce112_set_clock(
-       struct dccg *clk,
-       int requested_clk_khz)
-{
-       struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
-       struct bp_set_dce_clock_parameters dce_clk_params;
-       struct dc_bios *bp = clk->ctx->dc_bios;
-       struct dc *core_dc = clk->ctx->dc;
-       struct dmcu *dmcu = core_dc->res_pool->dmcu;
-       int actual_clock = requested_clk_khz;
-       /* Prepare to program display clock*/
-       memset(&dce_clk_params, 0, sizeof(dce_clk_params));
-
-       /* Make sure requested clock isn't lower than minimum threshold*/
-       if (requested_clk_khz > 0)
-               requested_clk_khz = max(requested_clk_khz,
-                               clk_dce->dentist_vco_freq_khz / 62);
-
-       dce_clk_params.target_clock_frequency = requested_clk_khz;
-       dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
-       dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
-
-       bp->funcs->set_dce_clock(bp, &dce_clk_params);
-       actual_clock = dce_clk_params.target_clock_frequency;
-
-       /* from power down, we need mark the clock state as ClocksStateNominal
-        * from HWReset, so when resume we will call pplib voltage regulator.*/
-       if (requested_clk_khz == 0)
-               clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
-
-       /*Program DP ref Clock*/
-       /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
-       dce_clk_params.target_clock_frequency = 0;
-       dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
-       if (!ASICREV_IS_VEGA20_P(clk->ctx->asic_id.hw_internal_rev))
-               dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
-                       (dce_clk_params.pll_id ==
-                                       CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
-       else
-               dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
-
-       bp->funcs->set_dce_clock(bp, &dce_clk_params);
-
-       if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
-               if (clk_dce->dfs_bypass_disp_clk != actual_clock)
-                       dmcu->funcs->set_psr_wait_loop(dmcu,
-                                       actual_clock / 1000 / 7);
-       }
-
-       clk_dce->dfs_bypass_disp_clk = actual_clock;
-       return actual_clock;
-}
-
-static void dce_clock_read_integrated_info(struct dce_dccg *clk_dce)
-{
-       struct dc_debug_options *debug = &clk_dce->base.ctx->dc->debug;
-       struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
-       struct integrated_info info = { { { 0 } } };
-       struct dc_firmware_info fw_info = { { 0 } };
-       int i;
-
-       if (bp->integrated_info)
-               info = *bp->integrated_info;
-
-       clk_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
-       if (clk_dce->dentist_vco_freq_khz == 0) {
-               bp->funcs->get_firmware_info(bp, &fw_info);
-               clk_dce->dentist_vco_freq_khz =
-                       fw_info.smu_gpu_pll_output_freq;
-               if (clk_dce->dentist_vco_freq_khz == 0)
-                       clk_dce->dentist_vco_freq_khz = 3600000;
-       }
-
-       /*update the maximum display clock for each power state*/
-       for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
-               enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
-
-               switch (i) {
-               case 0:
-                       clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
-                       break;
-
-               case 1:
-                       clk_state = DM_PP_CLOCKS_STATE_LOW;
-                       break;
-
-               case 2:
-                       clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
-                       break;
-
-               case 3:
-                       clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
-                       break;
-
-               default:
-                       clk_state = DM_PP_CLOCKS_STATE_INVALID;
-                       break;
-               }
-
-               /*Do not allow bad VBIOS/SBIOS to override with invalid values,
-                * check for > 100MHz*/
-               if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
-                       clk_dce->max_clks_by_state[clk_state].display_clk_khz =
-                               info.disp_clk_voltage[i].max_supported_clk;
-       }
-
-       if (!debug->disable_dfs_bypass && bp->integrated_info)
-               if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
-                       clk_dce->dfs_bypass_enabled = true;
-}
-
-static void dce_clock_read_ss_info(struct dce_dccg *clk_dce)
-{
-       struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
-       int ss_info_num = bp->funcs->get_ss_entry_number(
-                       bp, AS_SIGNAL_TYPE_GPU_PLL);
-
-       if (ss_info_num) {
-               struct spread_spectrum_info info = { { 0 } };
-               enum bp_result result = bp->funcs->get_spread_spectrum_info(
-                               bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
-
-               /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
-                * even if SS not enabled and in that case
-                * SSInfo.spreadSpectrumPercentage !=0 would be sign
-                * that SS is enabled
-                */
-               if (result == BP_RESULT_OK &&
-                               info.spread_spectrum_percentage != 0) {
-                       clk_dce->ss_on_dprefclk = true;
-                       clk_dce->dprefclk_ss_divider = info.spread_percentage_divider;
-
-                       if (info.type.CENTER_MODE == 0) {
-                               /* TODO: Currently for DP Reference clock we
-                                * need only SS percentage for
-                                * downspread */
-                               clk_dce->dprefclk_ss_percentage =
-                                               info.spread_spectrum_percentage;
-                       }
-
-                       return;
-               }
-
-               result = bp->funcs->get_spread_spectrum_info(
-                               bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);
-
-               /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
-                * even if SS not enabled and in that case
-                * SSInfo.spreadSpectrumPercentage !=0 would be sign
-                * that SS is enabled
-                */
-               if (result == BP_RESULT_OK &&
-                               info.spread_spectrum_percentage != 0) {
-                       clk_dce->ss_on_dprefclk = true;
-                       clk_dce->dprefclk_ss_divider = info.spread_percentage_divider;
-
-                       if (info.type.CENTER_MODE == 0) {
-                               /* Currently for DP Reference clock we
-                                * need only SS percentage for
-                                * downspread */
-                               clk_dce->dprefclk_ss_percentage =
-                                               info.spread_spectrum_percentage;
-                       }
-               }
-       }
-}
-
-static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
-{
-       return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
-}
-
-static void dce12_update_clocks(struct dccg *dccg,
-                       struct dc_clocks *new_clocks,
-                       bool safe_to_lower)
-{
-       struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
-
-       /* TODO: Investigate why this is needed to fix display corruption. */
-       new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100;
-
-       if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
-               clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
-               clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
-               new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
-               dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
-
-               dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
-       }
-
-       if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
-               clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
-               clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz;
-               dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
-
-               dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
-       }
-}
-
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
-static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks)
-{
-       bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
-       bool dispclk_increase = new_clocks->dispclk_khz > dccg->clks.dispclk_khz;
-       int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
-       bool cur_dpp_div = dccg->clks.dispclk_khz > dccg->clks.dppclk_khz;
-
-       /* increase clock, looking for div is 0 for current, request div is 1*/
-       if (dispclk_increase) {
-               /* already divided by 2, no need to reach target clk with 2 steps*/
-               if (cur_dpp_div)
-                       return new_clocks->dispclk_khz;
-
-               /* request disp clk is lower than maximum supported dpp clk,
-                * no need to reach target clk with two steps.
-                */
-               if (new_clocks->dispclk_khz <= disp_clk_threshold)
-                       return new_clocks->dispclk_khz;
-
-               /* target dpp clk not request divided by 2, still within threshold */
-               if (!request_dpp_div)
-                       return new_clocks->dispclk_khz;
-
-       } else {
-               /* decrease clock, looking for current dppclk divided by 2,
-                * request dppclk not divided by 2.
-                */
-
-               /* current dpp clk not divided by 2, no need to ramp*/
-               if (!cur_dpp_div)
-                       return new_clocks->dispclk_khz;
-
-               /* current disp clk is lower than current maximum dpp clk,
-                * no need to ramp
-                */
-               if (dccg->clks.dispclk_khz <= disp_clk_threshold)
-                       return new_clocks->dispclk_khz;
-
-               /* request dpp clk need to be divided by 2 */
-               if (request_dpp_div)
-                       return new_clocks->dispclk_khz;
-       }
-
-       return disp_clk_threshold;
-}
-
-static void dcn1_ramp_up_dispclk_with_dpp(struct dccg *dccg, struct dc_clocks *new_clocks)
-{
-       struct dc *dc = dccg->ctx->dc;
-       int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(dccg, new_clocks);
-       bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
-       int i;
-
-       /* set disp clk to dpp clk threshold */
-       dccg->funcs->set_dispclk(dccg, dispclk_to_dpp_threshold);
-
-       /* update request dpp clk division option */
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
-
-               if (!pipe_ctx->plane_state)
-                       continue;
-
-               pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
-                               pipe_ctx->plane_res.dpp,
-                               request_dpp_div,
-                               true);
-       }
-
-       /* If target clk not same as dppclk threshold, set to target clock */
-       if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
-               dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
-
-       dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
-       dccg->clks.dppclk_khz = new_clocks->dppclk_khz;
-       dccg->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
-}
-
-static void dcn1_update_clocks(struct dccg *dccg,
-                       struct dc_clocks *new_clocks,
-                       bool safe_to_lower)
-{
-       struct dc *dc = dccg->ctx->dc;
-       struct pp_smu_display_requirement_rv *smu_req_cur =
-                       &dc->res_pool->pp_smu_req;
-       struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
-       struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
-       struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
-       bool send_request_to_increase = false;
-       bool send_request_to_lower = false;
-
-       if (new_clocks->phyclk_khz)
-               smu_req.display_count = 1;
-       else
-               smu_req.display_count = 0;
-
-       if (new_clocks->dispclk_khz > dccg->clks.dispclk_khz
-                       || new_clocks->phyclk_khz > dccg->clks.phyclk_khz
-                       || new_clocks->fclk_khz > dccg->clks.fclk_khz
-                       || new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz)
-               send_request_to_increase = true;
-
-       if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
-               dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
-
-               send_request_to_lower = true;
-       }
-
-       if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, dccg->clks.fclk_khz)) {
-               dccg->clks.fclk_khz = new_clocks->fclk_khz;
-               clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
-               clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
-               smu_req.hard_min_fclk_khz = new_clocks->fclk_khz;
-
-               dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
-               send_request_to_lower = true;
-       }
-
-       if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, dccg->clks.dcfclk_khz)) {
-               dccg->clks.dcfclk_khz = new_clocks->dcfclk_khz;
-               smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz;
-
-               send_request_to_lower = true;
-       }
-
-       if (should_set_clock(safe_to_lower,
-                       new_clocks->dcfclk_deep_sleep_khz, dccg->clks.dcfclk_deep_sleep_khz)) {
-               dccg->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
-               smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz;
-
-               send_request_to_lower = true;
-       }
-
-       /* make sure dcf clk is before dpp clk to
-        * make sure we have enough voltage to run dpp clk
-        */
-       if (send_request_to_increase) {
-               /*use dcfclk to request voltage*/
-               clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
-               clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
-               dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
-               if (pp_smu->set_display_requirement)
-                       pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
-       }
-
-       /* dcn1 dppclk is tied to dispclk */
-       /* program dispclk on = as a w/a for sleep resume clock ramping issues */
-       if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)
-                       || new_clocks->dispclk_khz == dccg->clks.dispclk_khz) {
-               dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks);
-               dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
-
-               send_request_to_lower = true;
-       }
-
-       if (!send_request_to_increase && send_request_to_lower) {
-               /*use dcfclk to request voltage*/
-               clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
-               clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
-               dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
-               if (pp_smu->set_display_requirement)
-                       pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
-       }
-
-
-       *smu_req_cur = smu_req;
-}
-#endif
-
-static void dce_update_clocks(struct dccg *dccg,
-                       struct dc_clocks *new_clocks,
-                       bool safe_to_lower)
-{
-       struct dm_pp_power_level_change_request level_change_req;
-       struct dce_dccg *clk_dce = TO_DCE_CLOCKS(dccg);
-
-       /* TODO: Investigate why this is needed to fix display corruption. */
-       if (!clk_dce->dfs_bypass_active)
-               new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100;
-
-       level_change_req.power_level = dce_get_required_clocks_state(dccg, new_clocks);
-       /* get max clock state from PPLIB */
-       if ((level_change_req.power_level < dccg->cur_min_clks_state && safe_to_lower)
-                       || level_change_req.power_level > dccg->cur_min_clks_state) {
-               if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
-                       dccg->cur_min_clks_state = level_change_req.power_level;
-       }
-
-       if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
-               new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
-               dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
-       }
-}
-
-static bool dce_update_dfs_bypass(
-       struct dccg *dccg,
-       struct dc *dc,
-       struct dc_state *context,
-       int requested_clock_khz)
-{
-       struct dce_dccg *clk_dce = TO_DCE_CLOCKS(dccg);
-       struct resource_context *res_ctx = &context->res_ctx;
-       enum signal_type signal_type = SIGNAL_TYPE_NONE;
-       bool was_active = clk_dce->dfs_bypass_active;
-       int i;
-
-       /* Disable DFS bypass by default. */
-       clk_dce->dfs_bypass_active = false;
-
-       /* Check that DFS bypass is available. */
-       if (!clk_dce->dfs_bypass_enabled)
-               goto update;
-
-       /* Check if the requested display clock is below the threshold. */
-       if (requested_clock_khz >= 400000)
-               goto update;
-
-       /* DFS-bypass should only be enabled on single stream setups */
-       if (context->stream_count != 1)
-               goto update;
-
-       /* Check that the stream's signal type is an embedded panel */
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               if (res_ctx->pipe_ctx[i].stream) {
-                       struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
-
-                       signal_type = pipe_ctx->stream->sink->link->connector_signal;
-                       break;
-               }
-       }
-
-       if (signal_type == SIGNAL_TYPE_EDP ||
-               signal_type == SIGNAL_TYPE_LVDS)
-               clk_dce->dfs_bypass_active = true;
-
-update:
-       /* Update the clock state. We don't need to respect safe_to_lower
-        * because DFS bypass should always be greater than the current
-        * display clock frequency.
-        */
-       if (was_active != clk_dce->dfs_bypass_active) {
-               dccg->clks.dispclk_khz =
-                       dccg->funcs->set_dispclk(dccg, dccg->clks.dispclk_khz);
-               return true;
-       }
-
-       return false;
-}
-
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
-static const struct display_clock_funcs dcn1_funcs = {
-       .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
-       .set_dispclk = dce112_set_clock,
-       .update_clocks = dcn1_update_clocks
-};
-#endif
-
-static const struct display_clock_funcs dce120_funcs = {
-       .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
-       .set_dispclk = dce112_set_clock,
-       .update_clocks = dce12_update_clocks
-};
-
-static const struct display_clock_funcs dce112_funcs = {
-       .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
-       .set_dispclk = dce112_set_clock,
-       .update_clocks = dce_update_clocks
-};
-
-static const struct display_clock_funcs dce110_funcs = {
-       .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
-       .set_dispclk = dce_psr_set_clock,
-       .update_clocks = dce_update_clocks,
-       .update_dfs_bypass = dce_update_dfs_bypass
-};
-
-static const struct display_clock_funcs dce_funcs = {
-       .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
-       .set_dispclk = dce_set_clock,
-       .update_clocks = dce_update_clocks
-};
-
-static void dce_dccg_construct(
-       struct dce_dccg *clk_dce,
-       struct dc_context *ctx,
-       const struct dccg_registers *regs,
-       const struct dccg_shift *clk_shift,
-       const struct dccg_mask *clk_mask)
-{
-       struct dccg *base = &clk_dce->base;
-
-       base->ctx = ctx;
-       base->funcs = &dce_funcs;
-
-       clk_dce->regs = regs;
-       clk_dce->clk_shift = clk_shift;
-       clk_dce->clk_mask = clk_mask;
-
-       clk_dce->dfs_bypass_disp_clk = 0;
-
-       clk_dce->dprefclk_ss_percentage = 0;
-       clk_dce->dprefclk_ss_divider = 1000;
-       clk_dce->ss_on_dprefclk = false;
-
-       base->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
-       base->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
-
-       dce_clock_read_integrated_info(clk_dce);
-       dce_clock_read_ss_info(clk_dce);
-}
-
-struct dccg *dce_dccg_create(
-       struct dc_context *ctx,
-       const struct dccg_registers *regs,
-       const struct dccg_shift *clk_shift,
-       const struct dccg_mask *clk_mask)
-{
-       struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
-       if (clk_dce == NULL) {
-               BREAK_TO_DEBUGGER();
-               return NULL;
-       }
-
-       memcpy(clk_dce->max_clks_by_state,
-               dce80_max_clks_by_state,
-               sizeof(dce80_max_clks_by_state));
-
-       dce_dccg_construct(
-               clk_dce, ctx, regs, clk_shift, clk_mask);
-
-       return &clk_dce->base;
-}
-
-struct dccg *dce110_dccg_create(
-       struct dc_context *ctx,
-       const struct dccg_registers *regs,
-       const struct dccg_shift *clk_shift,
-       const struct dccg_mask *clk_mask)
-{
-       struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
-       if (clk_dce == NULL) {
-               BREAK_TO_DEBUGGER();
-               return NULL;
-       }
-
-       memcpy(clk_dce->max_clks_by_state,
-               dce110_max_clks_by_state,
-               sizeof(dce110_max_clks_by_state));
-
-       dce_dccg_construct(
-               clk_dce, ctx, regs, clk_shift, clk_mask);
-
-       clk_dce->base.funcs = &dce110_funcs;
-
-       return &clk_dce->base;
-}
-
-struct dccg *dce112_dccg_create(
-       struct dc_context *ctx,
-       const struct dccg_registers *regs,
-       const struct dccg_shift *clk_shift,
-       const struct dccg_mask *clk_mask)
-{
-       struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
-       if (clk_dce == NULL) {
-               BREAK_TO_DEBUGGER();
-               return NULL;
-       }
-
-       memcpy(clk_dce->max_clks_by_state,
-               dce112_max_clks_by_state,
-               sizeof(dce112_max_clks_by_state));
-
-       dce_dccg_construct(
-               clk_dce, ctx, regs, clk_shift, clk_mask);
-
-       clk_dce->base.funcs = &dce112_funcs;
-
-       return &clk_dce->base;
-}
-
-struct dccg *dce120_dccg_create(struct dc_context *ctx)
-{
-       struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
-       if (clk_dce == NULL) {
-               BREAK_TO_DEBUGGER();
-               return NULL;
-       }
-
-       memcpy(clk_dce->max_clks_by_state,
-               dce120_max_clks_by_state,
-               sizeof(dce120_max_clks_by_state));
-
-       dce_dccg_construct(
-               clk_dce, ctx, NULL, NULL, NULL);
-
-       clk_dce->dprefclk_khz = 600000;
-       clk_dce->base.funcs = &dce120_funcs;
-
-       return &clk_dce->base;
-}
-
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
-struct dccg *dcn1_dccg_create(struct dc_context *ctx)
-{
-       struct dc_debug_options *debug = &ctx->dc->debug;
-       struct dc_bios *bp = ctx->dc_bios;
-       struct dc_firmware_info fw_info = { { 0 } };
-       struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
-       if (clk_dce == NULL) {
-               BREAK_TO_DEBUGGER();
-               return NULL;
-       }
-
-       clk_dce->base.ctx = ctx;
-       clk_dce->base.funcs = &dcn1_funcs;
-
-       clk_dce->dfs_bypass_disp_clk = 0;
-
-       clk_dce->dprefclk_ss_percentage = 0;
-       clk_dce->dprefclk_ss_divider = 1000;
-       clk_dce->ss_on_dprefclk = false;
-
-       clk_dce->dprefclk_khz = 600000;
-       if (bp->integrated_info)
-               clk_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
-       if (clk_dce->dentist_vco_freq_khz == 0) {
-               bp->funcs->get_firmware_info(bp, &fw_info);
-               clk_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
-               if (clk_dce->dentist_vco_freq_khz == 0)
-                       clk_dce->dentist_vco_freq_khz = 3600000;
-       }
-
-       if (!debug->disable_dfs_bypass && bp->integrated_info)
-               if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
-                       clk_dce->dfs_bypass_enabled = true;
-
-       dce_clock_read_ss_info(clk_dce);
-
-       return &clk_dce->base;
-}
-#endif
-
-void dce_dccg_destroy(struct dccg **dccg)
-{
-       struct dce_dccg *clk_dce = TO_DCE_CLOCKS(*dccg);
-
-       kfree(clk_dce);
-       *dccg = NULL;
-}
index 64dc75378541539028b7333f22d00329240a3166..c83a7f05f14c11090b90d081f0e338af9c95bc63 100644 (file)
@@ -233,6 +233,16 @@ struct dce_hwseq_registers {
        uint32_t DOMAIN5_PG_CONFIG;
        uint32_t DOMAIN6_PG_CONFIG;
        uint32_t DOMAIN7_PG_CONFIG;
+       uint32_t DOMAIN8_PG_CONFIG;
+       uint32_t DOMAIN9_PG_CONFIG;
+       uint32_t DOMAIN10_PG_CONFIG;
+       uint32_t DOMAIN11_PG_CONFIG;
+       uint32_t DOMAIN16_PG_CONFIG;
+       uint32_t DOMAIN17_PG_CONFIG;
+       uint32_t DOMAIN18_PG_CONFIG;
+       uint32_t DOMAIN19_PG_CONFIG;
+       uint32_t DOMAIN20_PG_CONFIG;
+       uint32_t DOMAIN21_PG_CONFIG;
        uint32_t DOMAIN0_PG_STATUS;
        uint32_t DOMAIN1_PG_STATUS;
        uint32_t DOMAIN2_PG_STATUS;
@@ -241,6 +251,16 @@ struct dce_hwseq_registers {
        uint32_t DOMAIN5_PG_STATUS;
        uint32_t DOMAIN6_PG_STATUS;
        uint32_t DOMAIN7_PG_STATUS;
+       uint32_t DOMAIN8_PG_STATUS;
+       uint32_t DOMAIN9_PG_STATUS;
+       uint32_t DOMAIN10_PG_STATUS;
+       uint32_t DOMAIN11_PG_STATUS;
+       uint32_t DOMAIN16_PG_STATUS;
+       uint32_t DOMAIN17_PG_STATUS;
+       uint32_t DOMAIN18_PG_STATUS;
+       uint32_t DOMAIN19_PG_STATUS;
+       uint32_t DOMAIN20_PG_STATUS;
+       uint32_t DOMAIN21_PG_STATUS;
        uint32_t DIO_MEM_PWR_CTRL;
        uint32_t DCCG_GATE_DISABLE_CNTL;
        uint32_t DCCG_GATE_DISABLE_CNTL2;
@@ -262,6 +282,8 @@ struct dce_hwseq_registers {
        uint32_t D2VGA_CONTROL;
        uint32_t D3VGA_CONTROL;
        uint32_t D4VGA_CONTROL;
+       uint32_t D5VGA_CONTROL;
+       uint32_t D6VGA_CONTROL;
        uint32_t VGA_TEST_CONTROL;
        /* MMHUB registers. read only. temporary hack */
        uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32;
@@ -489,6 +511,26 @@ struct dce_hwseq_registers {
        type DOMAIN6_POWER_GATE; \
        type DOMAIN7_POWER_FORCEON; \
        type DOMAIN7_POWER_GATE; \
+       type DOMAIN8_POWER_FORCEON; \
+       type DOMAIN8_POWER_GATE; \
+       type DOMAIN9_POWER_FORCEON; \
+       type DOMAIN9_POWER_GATE; \
+       type DOMAIN10_POWER_FORCEON; \
+       type DOMAIN10_POWER_GATE; \
+       type DOMAIN11_POWER_FORCEON; \
+       type DOMAIN11_POWER_GATE; \
+       type DOMAIN16_POWER_FORCEON; \
+       type DOMAIN16_POWER_GATE; \
+       type DOMAIN17_POWER_FORCEON; \
+       type DOMAIN17_POWER_GATE; \
+       type DOMAIN18_POWER_FORCEON; \
+       type DOMAIN18_POWER_GATE; \
+       type DOMAIN19_POWER_FORCEON; \
+       type DOMAIN19_POWER_GATE; \
+       type DOMAIN20_POWER_FORCEON; \
+       type DOMAIN20_POWER_GATE; \
+       type DOMAIN21_POWER_FORCEON; \
+       type DOMAIN21_POWER_GATE; \
        type DOMAIN0_PGFSM_PWR_STATUS; \
        type DOMAIN1_PGFSM_PWR_STATUS; \
        type DOMAIN2_PGFSM_PWR_STATUS; \
@@ -497,6 +539,16 @@ struct dce_hwseq_registers {
        type DOMAIN5_PGFSM_PWR_STATUS; \
        type DOMAIN6_PGFSM_PWR_STATUS; \
        type DOMAIN7_PGFSM_PWR_STATUS; \
+       type DOMAIN8_PGFSM_PWR_STATUS; \
+       type DOMAIN9_PGFSM_PWR_STATUS; \
+       type DOMAIN10_PGFSM_PWR_STATUS; \
+       type DOMAIN11_PGFSM_PWR_STATUS; \
+       type DOMAIN16_PGFSM_PWR_STATUS; \
+       type DOMAIN17_PGFSM_PWR_STATUS; \
+       type DOMAIN18_PGFSM_PWR_STATUS; \
+       type DOMAIN19_PGFSM_PWR_STATUS; \
+       type DOMAIN20_PGFSM_PWR_STATUS; \
+       type DOMAIN21_PGFSM_PWR_STATUS; \
        type DCFCLK_GATE_DIS; \
        type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
        type VGA_TEST_ENABLE; \
index 366bc8c2c643dd0383a4f5643daf06f64da164f3..3e18ea84b1f961ef7ddfd5b701e91e62f5e8852c 100644 (file)
@@ -645,7 +645,7 @@ static bool dce110_link_encoder_validate_hdmi_output(
                return false;
 
        /* DCE11 HW does not support 420 */
-       if (!enc110->base.features.ycbcr420_supported &&
+       if (!enc110->base.features.hdmi_ycbcr420_supported &&
                        crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
                return false;
 
index 74c05e8788073433895536f856ae451e77a37fe8..bc50a8e25f4f2d39ed1e0b3449511aa5e60c6460 100644 (file)
@@ -105,74 +105,18 @@ bool dce100_enable_display_power_gating(
                return false;
 }
 
-static void dce100_pplib_apply_display_requirements(
-       struct dc *dc,
-       struct dc_state *context)
-{
-       struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
-
-       pp_display_cfg->avail_mclk_switch_time_us =
-                                               dce110_get_min_vblank_time_us(context);
-       /*pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
-               / MEMORY_TYPE_MULTIPLIER;*/
-
-       dce110_fill_display_configs(context, pp_display_cfg);
-
-       if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
-                       struct dm_pp_display_configuration)) !=  0)
-               dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
-
-       dc->prev_display_config = *pp_display_cfg;
-}
-
-/* unit: in_khz before mode set, get pixel clock from context. ASIC register
- * may not be programmed yet
- */
-static uint32_t get_max_pixel_clock_for_all_paths(
-       struct dc *dc,
-       struct dc_state *context)
-{
-       uint32_t max_pix_clk = 0;
-       int i;
-
-       for (i = 0; i < MAX_PIPES; i++) {
-               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
-               if (pipe_ctx->stream == NULL)
-                       continue;
-
-               /* do not check under lay */
-               if (pipe_ctx->top_pipe)
-                       continue;
-
-               if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
-                       max_pix_clk =
-                               pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
-       }
-       return max_pix_clk;
-}
-
-void dce100_set_bandwidth(
+void dce100_prepare_bandwidth(
                struct dc *dc,
-               struct dc_state *context,
-               bool decrease_allowed)
+               struct dc_state *context)
 {
-       struct dc_clocks req_clks;
-
-       req_clks.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
-       req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
-
        dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
 
-       dc->res_pool->dccg->funcs->update_clocks(
-                       dc->res_pool->dccg,
-                       &req_clks,
-                       decrease_allowed);
-
-       dce100_pplib_apply_display_requirements(dc, context);
+       dc->res_pool->clk_mgr->funcs->update_clocks(
+                       dc->res_pool->clk_mgr,
+                       context,
+                       false);
 }
 
-
 /**************************************************************************/
 
 void dce100_hw_sequencer_construct(struct dc *dc)
@@ -180,8 +124,7 @@ void dce100_hw_sequencer_construct(struct dc *dc)
        dce110_hw_sequencer_construct(dc);
 
        dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
-       dc->hwss.set_bandwidth = dce100_set_bandwidth;
-       dc->hwss.pplib_apply_display_requirements =
-                       dce100_pplib_apply_display_requirements;
+       dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
+       dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth;
 }
 
index c6ec0ed6ec3de0d59fe0a99edee7ae4e14a8391c..acd418515346c49fb263f98ae27995f957f959db 100644 (file)
@@ -33,10 +33,9 @@ struct dc_state;
 
 void dce100_hw_sequencer_construct(struct dc *dc);
 
-void dce100_set_bandwidth(
+void dce100_prepare_bandwidth(
                struct dc *dc,
-               struct dc_state *context,
-               bool decrease_allowed);
+               struct dc_state *context);
 
 bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id,
                                        struct dc_bios *dcb,
index 14754a87156c53fe70269aaa9f883b4074bdfd79..6ae51a5dfc049da69d8530879c54eab38225e9c8 100644 (file)
 #include "dce/dce_link_encoder.h"
 #include "dce/dce_stream_encoder.h"
 
+#include "dce/dce_clk_mgr.h"
 #include "dce/dce_mem_input.h"
 #include "dce/dce_ipp.h"
 #include "dce/dce_transform.h"
 #include "dce/dce_opp.h"
-#include "dce/dce_clocks.h"
 #include "dce/dce_clock_source.h"
 #include "dce/dce_audio.h"
 #include "dce/dce_hwseq.h"
@@ -137,15 +137,15 @@ static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = {
        .reg_name = mm ## block ## id ## _ ## reg_name
 
 
-static const struct dccg_registers disp_clk_regs = {
+static const struct clk_mgr_registers disp_clk_regs = {
                CLK_COMMON_REG_LIST_DCE_BASE()
 };
 
-static const struct dccg_shift disp_clk_shift = {
+static const struct clk_mgr_shift disp_clk_shift = {
                CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
 };
 
-static const struct dccg_mask disp_clk_mask = {
+static const struct clk_mgr_mask disp_clk_mask = {
                CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
 };
 
@@ -722,8 +722,8 @@ static void destruct(struct dce110_resource_pool *pool)
                        dce_aud_destroy(&pool->base.audios[i]);
        }
 
-       if (pool->base.dccg != NULL)
-               dce_dccg_destroy(&pool->base.dccg);
+       if (pool->base.clk_mgr != NULL)
+               dce_clk_mgr_destroy(&pool->base.clk_mgr);
 
        if (pool->base.abm != NULL)
                                dce_abm_destroy(&pool->base.abm);
@@ -767,7 +767,7 @@ bool dce100_validate_bandwidth(
        if (at_least_one_pipe) {
                /* TODO implement when needed but for now hardcode max value*/
                context->bw.dce.dispclk_khz = 681000;
-               context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+               context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
        } else {
                context->bw.dce.dispclk_khz = 0;
                context->bw.dce.yclk_khz = 0;
@@ -860,7 +860,6 @@ static bool construct(
        struct dc_context *ctx = dc->ctx;
        struct dc_firmware_info info;
        struct dc_bios *bp;
-       struct dm_pp_static_clock_info static_clk_info = {0};
 
        ctx->dc_bios->regs = &bios_regs;
 
@@ -908,11 +907,11 @@ static bool construct(
                }
        }
 
-       pool->base.dccg = dce_dccg_create(ctx,
+       pool->base.clk_mgr = dce_clk_mgr_create(ctx,
                        &disp_clk_regs,
                        &disp_clk_shift,
                        &disp_clk_mask);
-       if (pool->base.dccg == NULL) {
+       if (pool->base.clk_mgr == NULL) {
                dm_error("DC: failed to create display clock!\n");
                BREAK_TO_DEBUGGER();
                goto res_create_fail;
@@ -938,12 +937,6 @@ static bool construct(
                goto res_create_fail;
        }
 
-       /* get static clock information for PPLIB or firmware, save
-        * max_clock_state
-        */
-       if (dm_pp_get_static_clocks(ctx, &static_clk_info))
-               pool->base.dccg->max_clks_state =
-                                       static_clk_info.max_clocks_state;
        {
                struct irq_service_init_data init_data;
                init_data.ctx = dc->ctx;
index b75ede5f84f76837960463387a90ca35aa7ac62a..9724a17e352b9286210b96be2b2bfab386478be7 100644 (file)
@@ -548,14 +548,14 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
 
        regamma_params->hw_points_num = hw_points;
 
-       i = 1;
-       for (k = 0; k < 16 && i < 16; k++) {
+       k = 0;
+       for (i = 1; i < 16; i++) {
                if (seg_distr[k] != -1) {
                        regamma_params->arr_curve_points[k].segments_num = seg_distr[k];
                        regamma_params->arr_curve_points[i].offset =
                                        regamma_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
                }
-               i++;
+               k++;
        }
 
        if (seg_distr[k] != -1)
@@ -1085,7 +1085,6 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
 
        if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
                link->dc->hwss.edp_backlight_control(link, true);
-               stream->bl_pwm_level = EDP_BACKLIGHT_RAMP_DISABLE_LEVEL;
        }
 }
 void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
@@ -1192,8 +1191,8 @@ static void build_audio_output(
        if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
                        pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
                audio_output->pll_info.dp_dto_source_clock_in_khz =
-                               state->dis_clk->funcs->get_dp_ref_clk_frequency(
-                                               state->dis_clk);
+                               state->dccg->funcs->get_dp_ref_clk_frequency(
+                                               state->dccg);
        }
 
        audio_output->pll_info.feed_back_divider =
@@ -1547,6 +1546,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
        int i;
        struct dc_link *edp_link_to_turnoff = NULL;
        struct dc_link *edp_link = get_link_for_edp(dc);
+       struct dc_bios *bios = dc->ctx->dc_bios;
        bool can_edp_fast_boot_optimize = false;
        bool apply_edp_fast_boot_optimization = false;
 
@@ -1573,6 +1573,20 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
                        if (context->streams[i]->signal == SIGNAL_TYPE_EDP) {
                                context->streams[i]->apply_edp_fast_boot_optimization = true;
                                apply_edp_fast_boot_optimization = true;
+
+                               /* When after S4 and S5, vbios may post edp and previous dpms_off
+                                * doesn't make sense.
+                                * Update dpms_off state to align hw and sw state via check
+                                * vBios scratch register.
+                                */
+                               if (bios->funcs->is_active_display)     {
+                                       const struct connector_device_tag_info *device_tag = &(edp_link->device_tag);
+
+                                       if (bios->funcs->is_active_display(bios,
+                                                       context->streams[i]->signal,
+                                                       device_tag))
+                                               context->streams[i]->dpms_off = false;
+                               }
                        }
                }
        }
@@ -1736,41 +1750,18 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
        if (events->force_trigger)
                value |= 0x1;
 
-       value |= 0x84;
+       if (num_pipes) {
+               struct dc *dc = pipe_ctx[0]->stream->ctx->dc;
+
+               if (dc->fbc_compressor)
+                       value |= 0x84;
+       }
 
        for (i = 0; i < num_pipes; i++)
                pipe_ctx[i]->stream_res.tg->funcs->
                        set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
 }
 
-/* unit: in_khz before mode set, get pixel clock from context. ASIC register
- * may not be programmed yet
- */
-static uint32_t get_max_pixel_clock_for_all_paths(
-       struct dc *dc,
-       struct dc_state *context)
-{
-       uint32_t max_pix_clk = 0;
-       int i;
-
-       for (i = 0; i < MAX_PIPES; i++) {
-               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
-               if (pipe_ctx->stream == NULL)
-                       continue;
-
-               /* do not check under lay */
-               if (pipe_ctx->top_pipe)
-                       continue;
-
-               if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
-                       max_pix_clk =
-                               pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
-       }
-
-       return max_pix_clk;
-}
-
 /*
  *  Check if FBC can be enabled
  */
@@ -2380,191 +2371,33 @@ static void init_hw(struct dc *dc)
 
 }
 
-void dce110_fill_display_configs(
-       const struct dc_state *context,
-       struct dm_pp_display_configuration *pp_display_cfg)
-{
-       int j;
-       int num_cfgs = 0;
-
-       for (j = 0; j < context->stream_count; j++) {
-               int k;
-
-               const struct dc_stream_state *stream = context->streams[j];
-               struct dm_pp_single_disp_config *cfg =
-                       &pp_display_cfg->disp_configs[num_cfgs];
-               const struct pipe_ctx *pipe_ctx = NULL;
-
-               for (k = 0; k < MAX_PIPES; k++)
-                       if (stream == context->res_ctx.pipe_ctx[k].stream) {
-                               pipe_ctx = &context->res_ctx.pipe_ctx[k];
-                               break;
-                       }
-
-               ASSERT(pipe_ctx != NULL);
-
-               /* only notify active stream */
-               if (stream->dpms_off)
-                       continue;
-
-               num_cfgs++;
-               cfg->signal = pipe_ctx->stream->signal;
-               cfg->pipe_idx = pipe_ctx->stream_res.tg->inst;
-               cfg->src_height = stream->src.height;
-               cfg->src_width = stream->src.width;
-               cfg->ddi_channel_mapping =
-                       stream->sink->link->ddi_channel_mapping.raw;
-               cfg->transmitter =
-                       stream->sink->link->link_enc->transmitter;
-               cfg->link_settings.lane_count =
-                       stream->sink->link->cur_link_settings.lane_count;
-               cfg->link_settings.link_rate =
-                       stream->sink->link->cur_link_settings.link_rate;
-               cfg->link_settings.link_spread =
-                       stream->sink->link->cur_link_settings.link_spread;
-               cfg->sym_clock = stream->phy_pix_clk;
-               /* Round v_refresh*/
-               cfg->v_refresh = stream->timing.pix_clk_khz * 1000;
-               cfg->v_refresh /= stream->timing.h_total;
-               cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
-                                                       / stream->timing.v_total;
-       }
-
-       pp_display_cfg->display_count = num_cfgs;
-}
-
-uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
-{
-       uint8_t j;
-       uint32_t min_vertical_blank_time = -1;
-
-       for (j = 0; j < context->stream_count; j++) {
-               struct dc_stream_state *stream = context->streams[j];
-               uint32_t vertical_blank_in_pixels = 0;
-               uint32_t vertical_blank_time = 0;
-
-               vertical_blank_in_pixels = stream->timing.h_total *
-                       (stream->timing.v_total
-                        - stream->timing.v_addressable);
-
-               vertical_blank_time = vertical_blank_in_pixels
-                       * 1000 / stream->timing.pix_clk_khz;
-
-               if (min_vertical_blank_time > vertical_blank_time)
-                       min_vertical_blank_time = vertical_blank_time;
-       }
-
-       return min_vertical_blank_time;
-}
-
-static int determine_sclk_from_bounding_box(
-               const struct dc *dc,
-               int required_sclk)
-{
-       int i;
 
-       /*
-        * Some asics do not give us sclk levels, so we just report the actual
-        * required sclk
-        */
-       if (dc->sclk_lvls.num_levels == 0)
-               return required_sclk;
-
-       for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
-               if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
-                       return dc->sclk_lvls.clocks_in_khz[i];
-       }
-       /*
-        * even maximum level could not satisfy requirement, this
-        * is unexpected at this stage, should have been caught at
-        * validation time
-        */
-       ASSERT(0);
-       return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
-}
-
-static void pplib_apply_display_requirements(
-       struct dc *dc,
-       struct dc_state *context)
+void dce110_prepare_bandwidth(
+               struct dc *dc,
+               struct dc_state *context)
 {
-       struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+       struct clk_mgr *dccg = dc->res_pool->clk_mgr;
 
-       pp_display_cfg->all_displays_in_sync =
-               context->bw.dce.all_displays_in_sync;
-       pp_display_cfg->nb_pstate_switch_disable =
-                       context->bw.dce.nbp_state_change_enable == false;
-       pp_display_cfg->cpu_cc6_disable =
-                       context->bw.dce.cpuc_state_change_enable == false;
-       pp_display_cfg->cpu_pstate_disable =
-                       context->bw.dce.cpup_state_change_enable == false;
-       pp_display_cfg->cpu_pstate_separation_time =
-                       context->bw.dce.blackout_recovery_time_us;
+       dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
 
-       pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
-               / MEMORY_TYPE_MULTIPLIER;
-
-       pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
-                       dc,
-                       context->bw.dce.sclk_khz);
-
-       pp_display_cfg->min_engine_clock_deep_sleep_khz
-                       = context->bw.dce.sclk_deep_sleep_khz;
-
-       pp_display_cfg->avail_mclk_switch_time_us =
-                                               dce110_get_min_vblank_time_us(context);
-       /* TODO: dce11.2*/
-       pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
-
-       pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
-
-       dce110_fill_display_configs(context, pp_display_cfg);
-
-       /* TODO: is this still applicable?*/
-       if (pp_display_cfg->display_count == 1) {
-               const struct dc_crtc_timing *timing =
-                       &context->streams[0]->timing;
-
-               pp_display_cfg->crtc_index =
-                       pp_display_cfg->disp_configs[0].pipe_idx;
-               pp_display_cfg->line_time_in_us = timing->h_total * 1000
-                                                       / timing->pix_clk_khz;
-       }
-
-       if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
-                       struct dm_pp_display_configuration)) !=  0)
-               dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
-
-       dc->prev_display_config = *pp_display_cfg;
+       dccg->funcs->update_clocks(
+                       dccg,
+                       context,
+                       false);
 }
 
-static void dce110_set_bandwidth(
+void dce110_optimize_bandwidth(
                struct dc *dc,
-               struct dc_state *context,
-               bool decrease_allowed)
+               struct dc_state *context)
 {
-       struct dc_clocks req_clks;
-       struct dccg *dccg = dc->res_pool->dccg;
-
-       req_clks.dispclk_khz = context->bw.dce.dispclk_khz;
-       req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
-
-       if (decrease_allowed)
-               dce110_set_displaymarks(dc, context);
-       else
-               dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+       struct clk_mgr *dccg = dc->res_pool->clk_mgr;
 
-       if (dccg->funcs->update_dfs_bypass)
-               dccg->funcs->update_dfs_bypass(
-                       dccg,
-                       dc,
-                       context,
-                       req_clks.dispclk_khz);
+       dce110_set_displaymarks(dc, context);
 
        dccg->funcs->update_clocks(
                        dccg,
-                       &req_clks,
-                       decrease_allowed);
-       pplib_apply_display_requirements(dc, context);
+                       context,
+                       true);
 }
 
 static void dce110_program_front_end_for_pipe(
@@ -2769,28 +2602,6 @@ static void dce110_wait_for_mpcc_disconnect(
        /* do nothing*/
 }
 
-static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
-               enum dc_color_space colorspace,
-               uint16_t *matrix)
-{
-       int i;
-       struct out_csc_color_matrix tbl_entry;
-
-       if (pipe_ctx->stream->csc_color_matrix.enable_adjustment
-                               == true) {
-                       enum dc_color_space color_space =
-                               pipe_ctx->stream->output_color_space;
-
-                       //uint16_t matrix[12];
-                       for (i = 0; i < 12; i++)
-                               tbl_entry.regval[i] = pipe_ctx->stream->csc_color_matrix.matrix[i];
-
-                       tbl_entry.color_space = color_space;
-                       //tbl_entry.regval = matrix;
-                       pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.xfm, &tbl_entry);
-       }
-}
-
 void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
 {
        struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
@@ -2839,13 +2650,8 @@ void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
                                pipe_ctx->plane_res.xfm, attributes);
 }
 
-static void ready_shared_resources(struct dc *dc, struct dc_state *context) {}
-
-static void optimize_shared_resources(struct dc *dc) {}
-
 static const struct hw_sequencer_funcs dce110_funcs = {
        .program_gamut_remap = program_gamut_remap,
-       .program_csc_matrix = program_csc_matrix,
        .init_hw = init_hw,
        .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
        .apply_ctx_for_surface = dce110_apply_ctx_for_surface,
@@ -2868,7 +2674,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
        .enable_display_power_gating = dce110_enable_display_power_gating,
        .disable_plane = dce110_power_down_fe,
        .pipe_control_lock = dce_pipe_control_lock,
-       .set_bandwidth = dce110_set_bandwidth,
+       .prepare_bandwidth = dce110_prepare_bandwidth,
+       .optimize_bandwidth = dce110_optimize_bandwidth,
        .set_drr = set_drr,
        .get_position = get_position,
        .set_static_screen_control = set_static_screen_control,
@@ -2877,9 +2684,6 @@ static const struct hw_sequencer_funcs dce110_funcs = {
        .setup_stereo = NULL,
        .set_avmute = dce110_set_avmute,
        .wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect,
-       .ready_shared_resources = ready_shared_resources,
-       .optimize_shared_resources = optimize_shared_resources,
-       .pplib_apply_display_requirements = pplib_apply_display_requirements,
        .edp_backlight_control = hwss_edp_backlight_control,
        .edp_power_control = hwss_edp_power_control,
        .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
index d6db3dbd90153ba4a3f9511eb494552b143b4255..cd3e36d52a5239512fd2d7edbcfbbe53743b0fa4 100644 (file)
@@ -40,7 +40,6 @@ enum dc_status dce110_apply_ctx_to_hw(
                struct dc_state *context);
 
 
-
 void dce110_enable_stream(struct pipe_ctx *pipe_ctx);
 
 void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option);
@@ -64,11 +63,13 @@ void dce110_set_safe_displaymarks(
                struct resource_context *res_ctx,
                const struct resource_pool *pool);
 
-void dce110_fill_display_configs(
-       const struct dc_state *context,
-       struct dm_pp_display_configuration *pp_display_cfg);
+void dce110_prepare_bandwidth(
+               struct dc *dc,
+               struct dc_state *context);
 
-uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
+void dce110_optimize_bandwidth(
+               struct dc *dc,
+               struct dc_state *context);
 
 void dp_receiver_power_ctrl(struct dc_link *link, bool on);
 
index e3624ca24574827a684a3c96dc257b2a4c19f215..e33d11785b1fd11300c45579178887e95d223a6b 100644 (file)
@@ -31,6 +31,7 @@
 #include "resource.h"
 #include "dce110/dce110_resource.h"
 
+#include "dce/dce_clk_mgr.h"
 #include "include/irq_service_interface.h"
 #include "dce/dce_audio.h"
 #include "dce110/dce110_timing_generator.h"
@@ -45,7 +46,6 @@
 #include "dce110/dce110_transform_v.h"
 #include "dce/dce_opp.h"
 #include "dce110/dce110_opp_v.h"
-#include "dce/dce_clocks.h"
 #include "dce/dce_clock_source.h"
 #include "dce/dce_hwseq.h"
 #include "dce110/dce110_hw_sequencer.h"
@@ -148,15 +148,15 @@ static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = {
 #define SRI(reg_name, block, id)\
        .reg_name = mm ## block ## id ## _ ## reg_name
 
-static const struct dccg_registers disp_clk_regs = {
+static const struct clk_mgr_registers disp_clk_regs = {
                CLK_COMMON_REG_LIST_DCE_BASE()
 };
 
-static const struct dccg_shift disp_clk_shift = {
+static const struct clk_mgr_shift disp_clk_shift = {
                CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
 };
 
-static const struct dccg_mask disp_clk_mask = {
+static const struct clk_mgr_mask disp_clk_mask = {
                CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
 };
 
@@ -760,8 +760,8 @@ static void destruct(struct dce110_resource_pool *pool)
        if (pool->base.dmcu != NULL)
                dce_dmcu_destroy(&pool->base.dmcu);
 
-       if (pool->base.dccg != NULL)
-               dce_dccg_destroy(&pool->base.dccg);
+       if (pool->base.clk_mgr != NULL)
+               dce_clk_mgr_destroy(&pool->base.clk_mgr);
 
        if (pool->base.irqs != NULL) {
                dal_irq_service_destroy(&pool->base.irqs);
@@ -1173,12 +1173,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
                        &clks);
 
        dc->bw_vbios->low_yclk = bw_frc_to_fixed(
-               clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000);
+               clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
        dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
-               clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER,
+               clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ,
                1000);
        dc->bw_vbios->high_yclk = bw_frc_to_fixed(
-               clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER,
+               clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ,
                1000);
 }
 
@@ -1201,7 +1201,6 @@ static bool construct(
        struct dc_context *ctx = dc->ctx;
        struct dc_firmware_info info;
        struct dc_bios *bp;
-       struct dm_pp_static_clock_info static_clk_info = {0};
 
        ctx->dc_bios->regs = &bios_regs;
 
@@ -1257,11 +1256,11 @@ static bool construct(
                }
        }
 
-       pool->base.dccg = dce110_dccg_create(ctx,
+       pool->base.clk_mgr = dce110_clk_mgr_create(ctx,
                        &disp_clk_regs,
                        &disp_clk_shift,
                        &disp_clk_mask);
-       if (pool->base.dccg == NULL) {
+       if (pool->base.clk_mgr == NULL) {
                dm_error("DC: failed to create display clock!\n");
                BREAK_TO_DEBUGGER();
                goto res_create_fail;
@@ -1287,13 +1286,6 @@ static bool construct(
                goto res_create_fail;
        }
 
-       /* get static clock information for PPLIB or firmware, save
-        * max_clock_state
-        */
-       if (dm_pp_get_static_clocks(ctx, &static_clk_info))
-               pool->base.dccg->max_clks_state =
-                               static_clk_info.max_clocks_state;
-
        {
                struct irq_service_init_data init_data;
                init_data.ctx = dc->ctx;
@@ -1362,7 +1354,8 @@ static bool construct(
                pool->base.sw_i2cs[i] = NULL;
        }
 
-       dc->fbc_compressor = dce110_compressor_create(ctx);
+       if (dc->config.fbc_support)
+               dc->fbc_compressor = dce110_compressor_create(ctx);
 
        if (!underlay_create(ctx, &pool->base))
                goto res_create_fail;
index 3ce79c208ddfb8297766e9453da3097a78e022f6..969d4e72dc94266a77b601f94d6ffba7035f4a50 100644 (file)
@@ -35,6 +35,7 @@
 
 #include "irq/dce110/irq_service_dce110.h"
 
+#include "dce/dce_clk_mgr.h"
 #include "dce/dce_mem_input.h"
 #include "dce/dce_transform.h"
 #include "dce/dce_link_encoder.h"
@@ -42,7 +43,6 @@
 #include "dce/dce_audio.h"
 #include "dce/dce_opp.h"
 #include "dce/dce_ipp.h"
-#include "dce/dce_clocks.h"
 #include "dce/dce_clock_source.h"
 
 #include "dce/dce_hwseq.h"
@@ -148,15 +148,15 @@ static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = {
        .reg_name = mm ## block ## id ## _ ## reg_name
 
 
-static const struct dccg_registers disp_clk_regs = {
+static const struct clk_mgr_registers disp_clk_regs = {
                CLK_COMMON_REG_LIST_DCE_BASE()
 };
 
-static const struct dccg_shift disp_clk_shift = {
+static const struct clk_mgr_shift disp_clk_shift = {
                CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
 };
 
-static const struct dccg_mask disp_clk_mask = {
+static const struct clk_mgr_mask disp_clk_mask = {
                CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
 };
 
@@ -551,7 +551,8 @@ static struct transform *dce112_transform_create(
 static const struct encoder_feature_support link_enc_feature = {
                .max_hdmi_deep_color = COLOR_DEPTH_121212,
                .max_hdmi_pixel_clock = 600000,
-               .ycbcr420_supported = true,
+               .hdmi_ycbcr420_supported = true,
+               .dp_ycbcr420_supported = false,
                .flags.bits.IS_HBR2_CAPABLE = true,
                .flags.bits.IS_HBR3_CAPABLE = true,
                .flags.bits.IS_TPS3_CAPABLE = true,
@@ -749,8 +750,8 @@ static void destruct(struct dce110_resource_pool *pool)
        if (pool->base.dmcu != NULL)
                dce_dmcu_destroy(&pool->base.dmcu);
 
-       if (pool->base.dccg != NULL)
-               dce_dccg_destroy(&pool->base.dccg);
+       if (pool->base.clk_mgr != NULL)
+               dce_clk_mgr_destroy(&pool->base.clk_mgr);
 
        if (pool->base.irqs != NULL) {
                dal_irq_service_destroy(&pool->base.irqs);
@@ -1015,12 +1016,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
                                &clks);
 
                dc->bw_vbios->low_yclk = bw_frc_to_fixed(
-                       clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000);
+                       clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
                dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
-                       clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER,
+                       clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ,
                        1000);
                dc->bw_vbios->high_yclk = bw_frc_to_fixed(
-                       clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER,
+                       clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ,
                        1000);
 
                return;
@@ -1056,12 +1057,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
         * YCLK = UMACLK*m_memoryTypeMultiplier
         */
        dc->bw_vbios->low_yclk = bw_frc_to_fixed(
-               mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000);
+               mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
        dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
-               mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+               mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
                1000);
        dc->bw_vbios->high_yclk = bw_frc_to_fixed(
-               mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+               mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
                1000);
 
        /* Now notify PPLib/SMU about which Watermarks sets they should select
@@ -1131,7 +1132,6 @@ static bool construct(
 {
        unsigned int i;
        struct dc_context *ctx = dc->ctx;
-       struct dm_pp_static_clock_info static_clk_info = {0};
 
        ctx->dc_bios->regs = &bios_regs;
 
@@ -1199,11 +1199,11 @@ static bool construct(
                }
        }
 
-       pool->base.dccg = dce112_dccg_create(ctx,
+       pool->base.clk_mgr = dce112_clk_mgr_create(ctx,
                        &disp_clk_regs,
                        &disp_clk_shift,
                        &disp_clk_mask);
-       if (pool->base.dccg == NULL) {
+       if (pool->base.clk_mgr == NULL) {
                dm_error("DC: failed to create display clock!\n");
                BREAK_TO_DEBUGGER();
                goto res_create_fail;
@@ -1229,13 +1229,6 @@ static bool construct(
                goto res_create_fail;
        }
 
-       /* get static clock information for PPLIB or firmware, save
-        * max_clock_state
-        */
-       if (dm_pp_get_static_clocks(ctx, &static_clk_info))
-               pool->base.dccg->max_clks_state =
-                               static_clk_info.max_clocks_state;
-
        {
                struct irq_service_init_data init_data;
                init_data.ctx = dc->ctx;
index 79ab5f9f9115640fa85b4b079e8887b0f3a61232..f12696674eb0cb779f481ad4569ecca85825b069 100644 (file)
@@ -31,6 +31,7 @@
 #include "resource.h"
 #include "include/irq_service_interface.h"
 #include "dce120_resource.h"
+
 #include "dce112/dce112_resource.h"
 
 #include "dce110/dce110_resource.h"
@@ -39,7 +40,6 @@
 #include "irq/dce120/irq_service_dce120.h"
 #include "dce/dce_opp.h"
 #include "dce/dce_clock_source.h"
-#include "dce/dce_clocks.h"
 #include "dce/dce_ipp.h"
 #include "dce/dce_mem_input.h"
 
@@ -47,6 +47,7 @@
 #include "dce120/dce120_hw_sequencer.h"
 #include "dce/dce_transform.h"
 
+#include "dce/dce_clk_mgr.h"
 #include "dce/dce_audio.h"
 #include "dce/dce_link_encoder.h"
 #include "dce/dce_stream_encoder.h"
@@ -573,8 +574,8 @@ static void destruct(struct dce110_resource_pool *pool)
        if (pool->base.dmcu != NULL)
                dce_dmcu_destroy(&pool->base.dmcu);
 
-       if (pool->base.dccg != NULL)
-               dce_dccg_destroy(&pool->base.dccg);
+       if (pool->base.clk_mgr != NULL)
+               dce_clk_mgr_destroy(&pool->base.clk_mgr);
 }
 
 static void read_dce_straps(
@@ -606,7 +607,8 @@ static struct audio *create_audio(
 static const struct encoder_feature_support link_enc_feature = {
                .max_hdmi_deep_color = COLOR_DEPTH_121212,
                .max_hdmi_pixel_clock = 600000,
-               .ycbcr420_supported = true,
+               .hdmi_ycbcr420_supported = true,
+               .dp_ycbcr420_supported = false,
                .flags.bits.IS_HBR2_CAPABLE = true,
                .flags.bits.IS_HBR3_CAPABLE = true,
                .flags.bits.IS_TPS3_CAPABLE = true,
@@ -834,12 +836,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
         * YCLK = UMACLK*m_memoryTypeMultiplier
         */
        dc->bw_vbios->low_yclk = bw_frc_to_fixed(
-               mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000);
+               mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
        dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
-               mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+               mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
                1000);
        dc->bw_vbios->high_yclk = bw_frc_to_fixed(
-               mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+               mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
                1000);
 
        /* Now notify PPLib/SMU about which Watermarks sets they should select
@@ -973,8 +975,8 @@ static bool construct(
                }
        }
 
-       pool->base.dccg = dce120_dccg_create(ctx);
-       if (pool->base.dccg == NULL) {
+       pool->base.clk_mgr = dce120_clk_mgr_create(ctx);
+       if (pool->base.clk_mgr == NULL) {
                dm_error("DC: failed to create display clock!\n");
                BREAK_TO_DEBUGGER();
                goto dccg_create_fail;
index 6c6a1a16af19f0377fe78dfa2af4aa69416aba21..a60a90e68d91837d67c9331217f7bdbada9f7b07 100644 (file)
@@ -76,6 +76,7 @@ void dce80_hw_sequencer_construct(struct dc *dc)
 
        dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
        dc->hwss.pipe_control_lock = dce_pipe_control_lock;
-       dc->hwss.set_bandwidth = dce100_set_bandwidth;
+       dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
+       dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth;
 }
 
index d68f951f98694b5e68c27152e7728d7c549ffab8..6d40b3d54ac154e18e2ec8226041497f1bc5b75d 100644 (file)
@@ -37,6 +37,7 @@
 #include "dce110/dce110_timing_generator.h"
 #include "dce110/dce110_resource.h"
 #include "dce80/dce80_timing_generator.h"
+#include "dce/dce_clk_mgr.h"
 #include "dce/dce_mem_input.h"
 #include "dce/dce_link_encoder.h"
 #include "dce/dce_stream_encoder.h"
@@ -44,7 +45,6 @@
 #include "dce/dce_ipp.h"
 #include "dce/dce_transform.h"
 #include "dce/dce_opp.h"
-#include "dce/dce_clocks.h"
 #include "dce/dce_clock_source.h"
 #include "dce/dce_audio.h"
 #include "dce/dce_hwseq.h"
@@ -155,15 +155,15 @@ static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = {
        .reg_name = mm ## block ## id ## _ ## reg_name
 
 
-static const struct dccg_registers disp_clk_regs = {
+static const struct clk_mgr_registers disp_clk_regs = {
                CLK_COMMON_REG_LIST_DCE_BASE()
 };
 
-static const struct dccg_shift disp_clk_shift = {
+static const struct clk_mgr_shift disp_clk_shift = {
                CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
 };
 
-static const struct dccg_mask disp_clk_mask = {
+static const struct clk_mgr_mask disp_clk_mask = {
                CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
 };
 
@@ -779,8 +779,8 @@ static void destruct(struct dce110_resource_pool *pool)
                }
        }
 
-       if (pool->base.dccg != NULL)
-               dce_dccg_destroy(&pool->base.dccg);
+       if (pool->base.clk_mgr != NULL)
+               dce_clk_mgr_destroy(&pool->base.clk_mgr);
 
        if (pool->base.irqs != NULL) {
                dal_irq_service_destroy(&pool->base.irqs);
@@ -793,7 +793,7 @@ bool dce80_validate_bandwidth(
 {
        /* TODO implement when needed but for now hardcode max value*/
        context->bw.dce.dispclk_khz = 681000;
-       context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+       context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
 
        return true;
 }
@@ -855,7 +855,6 @@ static bool dce80_construct(
        struct dc_context *ctx = dc->ctx;
        struct dc_firmware_info info;
        struct dc_bios *bp;
-       struct dm_pp_static_clock_info static_clk_info = {0};
 
        ctx->dc_bios->regs = &bios_regs;
 
@@ -918,11 +917,11 @@ static bool dce80_construct(
                }
        }
 
-       pool->base.dccg = dce_dccg_create(ctx,
+       pool->base.clk_mgr = dce_clk_mgr_create(ctx,
                        &disp_clk_regs,
                        &disp_clk_shift,
                        &disp_clk_mask);
-       if (pool->base.dccg == NULL) {
+       if (pool->base.clk_mgr == NULL) {
                dm_error("DC: failed to create display clock!\n");
                BREAK_TO_DEBUGGER();
                goto res_create_fail;
@@ -948,10 +947,6 @@ static bool dce80_construct(
                goto res_create_fail;
        }
 
-       if (dm_pp_get_static_clocks(ctx, &static_clk_info))
-               pool->base.dccg->max_clks_state =
-                                       static_clk_info.max_clocks_state;
-
        {
                struct irq_service_init_data init_data;
                init_data.ctx = dc->ctx;
@@ -1065,7 +1060,6 @@ static bool dce81_construct(
        struct dc_context *ctx = dc->ctx;
        struct dc_firmware_info info;
        struct dc_bios *bp;
-       struct dm_pp_static_clock_info static_clk_info = {0};
 
        ctx->dc_bios->regs = &bios_regs;
 
@@ -1128,11 +1122,11 @@ static bool dce81_construct(
                }
        }
 
-       pool->base.dccg = dce_dccg_create(ctx,
+       pool->base.clk_mgr = dce_clk_mgr_create(ctx,
                        &disp_clk_regs,
                        &disp_clk_shift,
                        &disp_clk_mask);
-       if (pool->base.dccg == NULL) {
+       if (pool->base.clk_mgr == NULL) {
                dm_error("DC: failed to create display clock!\n");
                BREAK_TO_DEBUGGER();
                goto res_create_fail;
@@ -1158,10 +1152,6 @@ static bool dce81_construct(
                goto res_create_fail;
        }
 
-       if (dm_pp_get_static_clocks(ctx, &static_clk_info))
-               pool->base.dccg->max_clks_state =
-                                       static_clk_info.max_clocks_state;
-
        {
                struct irq_service_init_data init_data;
                init_data.ctx = dc->ctx;
@@ -1275,7 +1265,6 @@ static bool dce83_construct(
        struct dc_context *ctx = dc->ctx;
        struct dc_firmware_info info;
        struct dc_bios *bp;
-       struct dm_pp_static_clock_info static_clk_info = {0};
 
        ctx->dc_bios->regs = &bios_regs;
 
@@ -1334,11 +1323,11 @@ static bool dce83_construct(
                }
        }
 
-       pool->base.dccg = dce_dccg_create(ctx,
+       pool->base.clk_mgr = dce_clk_mgr_create(ctx,
                        &disp_clk_regs,
                        &disp_clk_shift,
                        &disp_clk_mask);
-       if (pool->base.dccg == NULL) {
+       if (pool->base.clk_mgr == NULL) {
                dm_error("DC: failed to create display clock!\n");
                BREAK_TO_DEBUGGER();
                goto res_create_fail;
@@ -1364,10 +1353,6 @@ static bool dce83_construct(
                goto res_create_fail;
        }
 
-       if (dm_pp_get_static_clocks(ctx, &static_clk_info))
-               pool->base.dccg->max_clks_state =
-                                       static_clk_info.max_clocks_state;
-
        {
                struct irq_service_init_data init_data;
                init_data.ctx = dc->ctx;
index 032f872be89c8bd72b38959f773c04d0e988091c..55f293c8a3c057ecc6b79fb93bc40c6a4deb2572 100644 (file)
@@ -24,7 +24,7 @@
 
 DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \
                dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
-               dcn10_hubp.o dcn10_mpc.o \
+               dcn10_hubp.o dcn10_mpc.o dcn10_clk_mgr.o \
                dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
                dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
new file mode 100644 (file)
index 0000000..20f531d
--- /dev/null
@@ -0,0 +1,379 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dcn10_clk_mgr.h"
+
+#include "reg_helper.h"
+#include "core_types.h"
+
+#define TO_DCE_CLK_MGR(clocks)\
+       container_of(clocks, struct dce_clk_mgr, base)
+
+#define REG(reg) \
+       (clk_mgr_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+       clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name
+
+#define CTX \
+       clk_mgr_dce->base.ctx
+#define DC_LOGGER \
+       clk_mgr->ctx->logger
+
+void dcn1_pplib_apply_display_requirements(
+       struct dc *dc,
+       struct dc_state *context)
+{
+       struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+       pp_display_cfg->min_engine_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz;
+       pp_display_cfg->min_memory_clock_khz = dc->res_pool->clk_mgr->clks.fclk_khz;
+       pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz;
+       pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz;
+       pp_display_cfg->min_dcfclock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz;
+       pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz;
+       dce110_fill_display_configs(context, pp_display_cfg);
+
+       dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+}
+
+static int dcn1_determine_dppclk_threshold(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks)
+{
+       bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+       bool dispclk_increase = new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz;
+       int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
+       bool cur_dpp_div = clk_mgr->clks.dispclk_khz > clk_mgr->clks.dppclk_khz;
+
+       /* increase clock, looking for div is 0 for current, request div is 1*/
+       if (dispclk_increase) {
+               /* already divided by 2, no need to reach target clk with 2 steps*/
+               if (cur_dpp_div)
+                       return new_clocks->dispclk_khz;
+
+               /* request disp clk is lower than maximum supported dpp clk,
+                * no need to reach target clk with two steps.
+                */
+               if (new_clocks->dispclk_khz <= disp_clk_threshold)
+                       return new_clocks->dispclk_khz;
+
+               /* target dpp clk not request divided by 2, still within threshold */
+               if (!request_dpp_div)
+                       return new_clocks->dispclk_khz;
+
+       } else {
+               /* decrease clock, looking for current dppclk divided by 2,
+                * request dppclk not divided by 2.
+                */
+
+               /* current dpp clk not divided by 2, no need to ramp*/
+               if (!cur_dpp_div)
+                       return new_clocks->dispclk_khz;
+
+               /* current disp clk is lower than current maximum dpp clk,
+                * no need to ramp
+                */
+               if (clk_mgr->clks.dispclk_khz <= disp_clk_threshold)
+                       return new_clocks->dispclk_khz;
+
+               /* request dpp clk need to be divided by 2 */
+               if (request_dpp_div)
+                       return new_clocks->dispclk_khz;
+       }
+
+       return disp_clk_threshold;
+}
+
+static void dcn1_ramp_up_dispclk_with_dpp(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks)
+{
+       struct dc *dc = clk_mgr->ctx->dc;
+       int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(clk_mgr, new_clocks);
+       bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+       int i;
+
+       /* set disp clk to dpp clk threshold */
+       dce112_set_clock(clk_mgr, dispclk_to_dpp_threshold);
+
+       /* update request dpp clk division option */
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+               if (!pipe_ctx->plane_state)
+                       continue;
+
+               pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
+                               pipe_ctx->plane_res.dpp,
+                               request_dpp_div,
+                               true);
+       }
+
+       /* If target clk not same as dppclk threshold, set to target clock */
+       if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
+               dce112_set_clock(clk_mgr, new_clocks->dispclk_khz);
+
+       clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
+       clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz;
+       clk_mgr->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
+}
+
+static int get_active_display_cnt(
+               struct dc *dc,
+               struct dc_state *context)
+{
+       int i, display_count;
+
+       display_count = 0;
+       for (i = 0; i < context->stream_count; i++) {
+               const struct dc_stream_state *stream = context->streams[i];
+
+               /*
+                * Only notify active stream or virtual stream.
+                * Need to notify virtual stream to work around
+                * headless case. HPD does not fire when system is in
+                * S0i2.
+                */
+               if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL)
+                       display_count++;
+       }
+
+       return display_count;
+}
+
+static void notify_deep_sleep_dcfclk_to_smu(
+               struct pp_smu_funcs_rv *pp_smu, int min_dcef_deep_sleep_clk_khz)
+{
+       int min_dcef_deep_sleep_clk_mhz; //minimum required DCEF Deep Sleep clock in mhz
+       /*
+        * if function pointer not set up, this message is
+        * sent as part of pplib_apply_display_requirements.
+        * So just return.
+        */
+       if (!pp_smu || !pp_smu->set_min_deep_sleep_dcfclk)
+               return;
+
+       min_dcef_deep_sleep_clk_mhz = (min_dcef_deep_sleep_clk_khz + 999) / 1000; //Round up
+       pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, min_dcef_deep_sleep_clk_mhz);
+}
+
+static void notify_hard_min_dcfclk_to_smu(
+               struct pp_smu_funcs_rv *pp_smu, int min_dcf_clk_khz)
+{
+       int min_dcf_clk_mhz; //minimum required DCF clock in mhz
+
+       /*
+        * if function pointer not set up, this message is
+        * sent as part of pplib_apply_display_requirements.
+        * So just return.
+        */
+       if (!pp_smu || !pp_smu->set_hard_min_dcfclk_by_freq)
+               return;
+
+       min_dcf_clk_mhz = min_dcf_clk_khz / 1000;
+
+       pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, min_dcf_clk_mhz);
+}
+
+static void notify_hard_min_fclk_to_smu(
+               struct pp_smu_funcs_rv *pp_smu, int min_f_clk_khz)
+{
+       int min_f_clk_mhz; //minimum required F clock in mhz
+
+       /*
+        * if function pointer not set up, this message is
+        * sent as part of pplib_apply_display_requirements.
+        * So just return.
+        */
+       if (!pp_smu || !pp_smu->set_hard_min_fclk_by_freq)
+               return;
+
+       min_f_clk_mhz = min_f_clk_khz / 1000;
+
+       pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, min_f_clk_mhz);
+}
+
+static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
+                       struct dc_state *context,
+                       bool safe_to_lower)
+{
+       struct dc *dc = clk_mgr->ctx->dc;
+       struct dc_clocks *new_clocks = &context->bw.dcn.clk;
+       struct pp_smu_display_requirement_rv *smu_req_cur =
+                       &dc->res_pool->pp_smu_req;
+       struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
+       struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+       struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+       bool send_request_to_increase = false;
+       bool send_request_to_lower = false;
+       int display_count;
+
+       bool enter_display_off = false;
+
+       display_count = get_active_display_cnt(dc, context);
+
+       if (display_count == 0)
+               enter_display_off = true;
+
+       if (enter_display_off == safe_to_lower) {
+               /*
+                * Notify SMU active displays
+                * if function pointer not set up, this message is
+                * sent as part of pplib_apply_display_requirements.
+                */
+               if (pp_smu->set_display_count)
+                       pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
+               else
+                       smu_req.display_count = display_count;
+
+       }
+
+       if (new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz
+                       || new_clocks->phyclk_khz > clk_mgr->clks.phyclk_khz
+                       || new_clocks->fclk_khz > clk_mgr->clks.fclk_khz
+                       || new_clocks->dcfclk_khz > clk_mgr->clks.dcfclk_khz)
+               send_request_to_increase = true;
+
+       if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) {
+               clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz;
+
+               send_request_to_lower = true;
+       }
+
+       // F Clock
+       if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) {
+               clk_mgr->clks.fclk_khz = new_clocks->fclk_khz;
+               clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
+               clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
+               smu_req.hard_min_fclk_mhz = new_clocks->fclk_khz / 1000;
+
+               notify_hard_min_fclk_to_smu(pp_smu, new_clocks->fclk_khz);
+
+               send_request_to_lower = true;
+       }
+
+       //DCF Clock
+       if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) {
+               clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+               smu_req.hard_min_dcefclk_mhz = new_clocks->dcfclk_khz / 1000;
+
+               send_request_to_lower = true;
+       }
+
+       if (should_set_clock(safe_to_lower,
+                       new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) {
+               clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+               smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz / 1000;
+
+               send_request_to_lower = true;
+       }
+
+       /* make sure dcf clk is before dpp clk to
+        * make sure we have enough voltage to run dpp clk
+        */
+       if (send_request_to_increase) {
+               /*use dcfclk to request voltage*/
+               clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+               clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+
+               notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz);
+
+               if (pp_smu->set_display_requirement)
+                       pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+
+               notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz);
+               dcn1_pplib_apply_display_requirements(dc, context);
+       }
+
+       /* dcn1 dppclk is tied to dispclk */
+       /* program dispclk on = as a w/a for sleep resume clock ramping issues */
+       if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz)
+                       || new_clocks->dispclk_khz == clk_mgr->clks.dispclk_khz) {
+               dcn1_ramp_up_dispclk_with_dpp(clk_mgr, new_clocks);
+               clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
+
+               send_request_to_lower = true;
+       }
+
+       if (!send_request_to_increase && send_request_to_lower) {
+               /*use dcfclk to request voltage*/
+               clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+               clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+
+               notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz);
+
+               if (pp_smu->set_display_requirement)
+                       pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+
+               notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz);
+               dcn1_pplib_apply_display_requirements(dc, context);
+       }
+
+
+       *smu_req_cur = smu_req;
+}
+
+static const struct clk_mgr_funcs dcn1_funcs = {
+       .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+       .update_clocks = dcn1_update_clocks
+};
+
+struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx)
+{
+       struct dc_debug_options *debug = &ctx->dc->debug;
+       struct dc_bios *bp = ctx->dc_bios;
+       struct dc_firmware_info fw_info = { { 0 } };
+       struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+       if (clk_mgr_dce == NULL) {
+               BREAK_TO_DEBUGGER();
+               return NULL;
+       }
+
+       clk_mgr_dce->base.ctx = ctx;
+       clk_mgr_dce->base.funcs = &dcn1_funcs;
+
+       clk_mgr_dce->dfs_bypass_disp_clk = 0;
+
+       clk_mgr_dce->dprefclk_ss_percentage = 0;
+       clk_mgr_dce->dprefclk_ss_divider = 1000;
+       clk_mgr_dce->ss_on_dprefclk = false;
+
+       clk_mgr_dce->dprefclk_khz = 600000;
+       if (bp->integrated_info)
+               clk_mgr_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
+       if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
+               bp->funcs->get_firmware_info(bp, &fw_info);
+               clk_mgr_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
+               if (clk_mgr_dce->dentist_vco_freq_khz == 0)
+                       clk_mgr_dce->dentist_vco_freq_khz = 3600000;
+       }
+
+       if (!debug->disable_dfs_bypass && bp->integrated_info)
+               if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+                       clk_mgr_dce->dfs_bypass_enabled = true;
+
+       dce_clock_read_ss_info(clk_mgr_dce);
+
+       return &clk_mgr_dce->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
new file mode 100644 (file)
index 0000000..9dbaf65
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN10_CLK_MGR_H__
+#define __DCN10_CLK_MGR_H__
+
+#include "../dce/dce_clk_mgr.h"
+
+void dcn1_pplib_apply_display_requirements(
+       struct dc *dc,
+       struct dc_state *context);
+
+struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx);
+
+#endif //__DCN10_CLK_MGR_H__
index 5d95a997fd9f96e5539cf6e91fa3d05483cf9a8f..3eea44092a04949eb3ae8deaf8ee41b5e472b6a4 100644 (file)
@@ -71,39 +71,39 @@ void cm_helper_program_xfer_func(
        unsigned int i = 0;
 
        REG_SET_2(reg->start_cntl_b, 0,
-                       exp_region_start, params->arr_points[0].custom_float_x,
+                       exp_region_start, params->corner_points[0].blue.custom_float_x,
                        exp_resion_start_segment, 0);
        REG_SET_2(reg->start_cntl_g, 0,
-                       exp_region_start, params->arr_points[0].custom_float_x,
+                       exp_region_start, params->corner_points[0].green.custom_float_x,
                        exp_resion_start_segment, 0);
        REG_SET_2(reg->start_cntl_r, 0,
-                       exp_region_start, params->arr_points[0].custom_float_x,
+                       exp_region_start, params->corner_points[0].red.custom_float_x,
                        exp_resion_start_segment, 0);
 
        REG_SET(reg->start_slope_cntl_b, 0,
-                       field_region_linear_slope, params->arr_points[0].custom_float_slope);
+                       field_region_linear_slope, params->corner_points[0].blue.custom_float_slope);
        REG_SET(reg->start_slope_cntl_g, 0,
-                       field_region_linear_slope, params->arr_points[0].custom_float_slope);
+                       field_region_linear_slope, params->corner_points[0].green.custom_float_slope);
        REG_SET(reg->start_slope_cntl_r, 0,
-                       field_region_linear_slope, params->arr_points[0].custom_float_slope);
+                       field_region_linear_slope, params->corner_points[0].red.custom_float_slope);
 
        REG_SET(reg->start_end_cntl1_b, 0,
-                       field_region_end, params->arr_points[1].custom_float_x);
+                       field_region_end, params->corner_points[1].blue.custom_float_x);
        REG_SET_2(reg->start_end_cntl2_b, 0,
-                       field_region_end_slope, params->arr_points[1].custom_float_slope,
-                       field_region_end_base, params->arr_points[1].custom_float_y);
+                       field_region_end_slope, params->corner_points[1].blue.custom_float_slope,
+                       field_region_end_base, params->corner_points[1].blue.custom_float_y);
 
        REG_SET(reg->start_end_cntl1_g, 0,
-                       field_region_end, params->arr_points[1].custom_float_x);
+                       field_region_end, params->corner_points[1].green.custom_float_x);
        REG_SET_2(reg->start_end_cntl2_g, 0,
-                       field_region_end_slope, params->arr_points[1].custom_float_slope,
-               field_region_end_base, params->arr_points[1].custom_float_y);
+                       field_region_end_slope, params->corner_points[1].green.custom_float_slope,
+               field_region_end_base, params->corner_points[1].green.custom_float_y);
 
        REG_SET(reg->start_end_cntl1_r, 0,
-                       field_region_end, params->arr_points[1].custom_float_x);
+                       field_region_end, params->corner_points[1].red.custom_float_x);
        REG_SET_2(reg->start_end_cntl2_r, 0,
-                       field_region_end_slope, params->arr_points[1].custom_float_slope,
-               field_region_end_base, params->arr_points[1].custom_float_y);
+                       field_region_end_slope, params->corner_points[1].red.custom_float_slope,
+               field_region_end_base, params->corner_points[1].red.custom_float_y);
 
        for (reg_region_cur = reg->region_start;
                        reg_region_cur <= reg->region_end;
@@ -127,7 +127,7 @@ void cm_helper_program_xfer_func(
 
 bool cm_helper_convert_to_custom_float(
                struct pwl_result_data *rgb_resulted,
-               struct curve_points *arr_points,
+               struct curve_points3 *corner_points,
                uint32_t hw_points_num,
                bool fixpoint)
 {
@@ -141,20 +141,53 @@ bool cm_helper_convert_to_custom_float(
        fmt.mantissa_bits = 12;
        fmt.sign = false;
 
-       if (!convert_to_custom_float_format(arr_points[0].x, &fmt,
-                                           &arr_points[0].custom_float_x)) {
+       /* corner_points[0] - beginning base, slope offset for R,G,B
+        * corner_points[1] - end base, slope offset for R,G,B
+        */
+       if (!convert_to_custom_float_format(corner_points[0].red.x, &fmt,
+                               &corner_points[0].red.custom_float_x)) {
+               BREAK_TO_DEBUGGER();
+               return false;
+       }
+       if (!convert_to_custom_float_format(corner_points[0].green.x, &fmt,
+                               &corner_points[0].green.custom_float_x)) {
+               BREAK_TO_DEBUGGER();
+               return false;
+       }
+       if (!convert_to_custom_float_format(corner_points[0].blue.x, &fmt,
+                               &corner_points[0].blue.custom_float_x)) {
                BREAK_TO_DEBUGGER();
                return false;
        }
 
-       if (!convert_to_custom_float_format(arr_points[0].offset, &fmt,
-                                           &arr_points[0].custom_float_offset)) {
+       if (!convert_to_custom_float_format(corner_points[0].red.offset, &fmt,
+                               &corner_points[0].red.custom_float_offset)) {
+               BREAK_TO_DEBUGGER();
+               return false;
+       }
+       if (!convert_to_custom_float_format(corner_points[0].green.offset, &fmt,
+                               &corner_points[0].green.custom_float_offset)) {
+               BREAK_TO_DEBUGGER();
+               return false;
+       }
+       if (!convert_to_custom_float_format(corner_points[0].blue.offset, &fmt,
+                               &corner_points[0].blue.custom_float_offset)) {
                BREAK_TO_DEBUGGER();
                return false;
        }
 
-       if (!convert_to_custom_float_format(arr_points[0].slope, &fmt,
-                                           &arr_points[0].custom_float_slope)) {
+       if (!convert_to_custom_float_format(corner_points[0].red.slope, &fmt,
+                               &corner_points[0].red.custom_float_slope)) {
+               BREAK_TO_DEBUGGER();
+               return false;
+       }
+       if (!convert_to_custom_float_format(corner_points[0].green.slope, &fmt,
+                               &corner_points[0].green.custom_float_slope)) {
+               BREAK_TO_DEBUGGER();
+               return false;
+       }
+       if (!convert_to_custom_float_format(corner_points[0].blue.slope, &fmt,
+                               &corner_points[0].blue.custom_float_slope)) {
                BREAK_TO_DEBUGGER();
                return false;
        }
@@ -162,22 +195,59 @@ bool cm_helper_convert_to_custom_float(
        fmt.mantissa_bits = 10;
        fmt.sign = false;
 
-       if (!convert_to_custom_float_format(arr_points[1].x, &fmt,
-                                           &arr_points[1].custom_float_x)) {
+       if (!convert_to_custom_float_format(corner_points[1].red.x, &fmt,
+                               &corner_points[1].red.custom_float_x)) {
                BREAK_TO_DEBUGGER();
                return false;
        }
-
-       if (fixpoint == true)
-               arr_points[1].custom_float_y = dc_fixpt_clamp_u0d14(arr_points[1].y);
-       else if (!convert_to_custom_float_format(arr_points[1].y, &fmt,
-               &arr_points[1].custom_float_y)) {
+       if (!convert_to_custom_float_format(corner_points[1].green.x, &fmt,
+                               &corner_points[1].green.custom_float_x)) {
+               BREAK_TO_DEBUGGER();
+               return false;
+       }
+       if (!convert_to_custom_float_format(corner_points[1].blue.x, &fmt,
+                               &corner_points[1].blue.custom_float_x)) {
                BREAK_TO_DEBUGGER();
                return false;
        }
 
-       if (!convert_to_custom_float_format(arr_points[1].slope, &fmt,
-                                           &arr_points[1].custom_float_slope)) {
+       if (fixpoint == true) {
+               corner_points[1].red.custom_float_y =
+                               dc_fixpt_clamp_u0d14(corner_points[1].red.y);
+               corner_points[1].green.custom_float_y =
+                               dc_fixpt_clamp_u0d14(corner_points[1].green.y);
+               corner_points[1].blue.custom_float_y =
+                               dc_fixpt_clamp_u0d14(corner_points[1].blue.y);
+       } else {
+               if (!convert_to_custom_float_format(corner_points[1].red.y,
+                               &fmt, &corner_points[1].red.custom_float_y)) {
+                       BREAK_TO_DEBUGGER();
+                       return false;
+               }
+               if (!convert_to_custom_float_format(corner_points[1].green.y,
+                               &fmt, &corner_points[1].green.custom_float_y)) {
+                       BREAK_TO_DEBUGGER();
+                       return false;
+               }
+               if (!convert_to_custom_float_format(corner_points[1].blue.y,
+                               &fmt, &corner_points[1].blue.custom_float_y)) {
+                       BREAK_TO_DEBUGGER();
+                       return false;
+               }
+       }
+
+       if (!convert_to_custom_float_format(corner_points[1].red.slope, &fmt,
+                               &corner_points[1].red.custom_float_slope)) {
+               BREAK_TO_DEBUGGER();
+               return false;
+       }
+       if (!convert_to_custom_float_format(corner_points[1].green.slope, &fmt,
+                               &corner_points[1].green.custom_float_slope)) {
+               BREAK_TO_DEBUGGER();
+               return false;
+       }
+       if (!convert_to_custom_float_format(corner_points[1].blue.slope, &fmt,
+                               &corner_points[1].blue.custom_float_slope)) {
                BREAK_TO_DEBUGGER();
                return false;
        }
@@ -242,15 +312,10 @@ bool cm_helper_translate_curve_to_hw_format(
                                const struct dc_transfer_func *output_tf,
                                struct pwl_params *lut_params, bool fixpoint)
 {
-       struct curve_points *arr_points;
+       struct curve_points3 *corner_points;
        struct pwl_result_data *rgb_resulted;
        struct pwl_result_data *rgb;
        struct pwl_result_data *rgb_plus_1;
-       struct fixed31_32 y_r;
-       struct fixed31_32 y_g;
-       struct fixed31_32 y_b;
-       struct fixed31_32 y1_min;
-       struct fixed31_32 y3_max;
 
        int32_t region_start, region_end;
        int32_t i;
@@ -261,14 +326,14 @@ bool cm_helper_translate_curve_to_hw_format(
 
        PERF_TRACE();
 
-       arr_points = lut_params->arr_points;
+       corner_points = lut_params->corner_points;
        rgb_resulted = lut_params->rgb_resulted;
        hw_points = 0;
 
        memset(lut_params, 0, sizeof(struct pwl_params));
        memset(seg_distr, 0, sizeof(seg_distr));
 
-       if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+       if (output_tf->tf == TRANSFER_FUNCTION_PQ || output_tf->tf == TRANSFER_FUNCTION_GAMMA22) {
                /* 32 segments
                 * segments are from 2^-25 to 2^7
                 */
@@ -327,31 +392,37 @@ bool cm_helper_translate_curve_to_hw_format(
        rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
        rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
 
-       arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2),
+       // All 3 color channels have same x
+       corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
                                             dc_fixpt_from_int(region_start));
-       arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2),
-                                            dc_fixpt_from_int(region_end));
+       corner_points[0].green.x = corner_points[0].red.x;
+       corner_points[0].blue.x = corner_points[0].red.x;
 
-       y_r = rgb_resulted[0].red;
-       y_g = rgb_resulted[0].green;
-       y_b = rgb_resulted[0].blue;
+       corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
+                                            dc_fixpt_from_int(region_end));
+       corner_points[1].green.x = corner_points[1].red.x;
+       corner_points[1].blue.x = corner_points[1].red.x;
 
-       y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b));
+       corner_points[0].red.y = rgb_resulted[0].red;
+       corner_points[0].green.y = rgb_resulted[0].green;
+       corner_points[0].blue.y = rgb_resulted[0].blue;
 
-       arr_points[0].y = y1_min;
-       arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x);
-       y_r = rgb_resulted[hw_points - 1].red;
-       y_g = rgb_resulted[hw_points - 1].green;
-       y_b = rgb_resulted[hw_points - 1].blue;
+       corner_points[0].red.slope = dc_fixpt_div(corner_points[0].red.y,
+                       corner_points[0].red.x);
+       corner_points[0].green.slope = dc_fixpt_div(corner_points[0].green.y,
+                       corner_points[0].green.x);
+       corner_points[0].blue.slope = dc_fixpt_div(corner_points[0].blue.y,
+                       corner_points[0].blue.x);
 
        /* see comment above, m_arrPoints[1].y should be the Y value for the
         * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
         */
-       y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b));
-
-       arr_points[1].y = y3_max;
-
-       arr_points[1].slope = dc_fixpt_zero;
+       corner_points[1].red.y = rgb_resulted[hw_points - 1].red;
+       corner_points[1].green.y = rgb_resulted[hw_points - 1].green;
+       corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue;
+       corner_points[1].red.slope = dc_fixpt_zero;
+       corner_points[1].green.slope = dc_fixpt_zero;
+       corner_points[1].blue.slope = dc_fixpt_zero;
 
        if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
                /* for PQ, we want to have a straight line from last HW X point,
@@ -360,9 +431,15 @@ bool cm_helper_translate_curve_to_hw_format(
                const struct fixed31_32 end_value =
                                dc_fixpt_from_int(125);
 
-               arr_points[1].slope = dc_fixpt_div(
-                       dc_fixpt_sub(dc_fixpt_one, arr_points[1].y),
-                       dc_fixpt_sub(end_value, arr_points[1].x));
+               corner_points[1].red.slope = dc_fixpt_div(
+                       dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y),
+                       dc_fixpt_sub(end_value, corner_points[1].red.x));
+               corner_points[1].green.slope = dc_fixpt_div(
+                       dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y),
+                       dc_fixpt_sub(end_value, corner_points[1].green.x));
+               corner_points[1].blue.slope = dc_fixpt_div(
+                       dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y),
+                       dc_fixpt_sub(end_value, corner_points[1].blue.x));
        }
 
        lut_params->hw_points_num = hw_points;
@@ -411,7 +488,7 @@ bool cm_helper_translate_curve_to_hw_format(
                ++i;
        }
        cm_helper_convert_to_custom_float(rgb_resulted,
-                                               lut_params->arr_points,
+                                               lut_params->corner_points,
                                                hw_points, fixpoint);
 
        return true;
@@ -424,15 +501,10 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
                                const struct dc_transfer_func *output_tf,
                                struct pwl_params *lut_params)
 {
-       struct curve_points *arr_points;
+       struct curve_points3 *corner_points;
        struct pwl_result_data *rgb_resulted;
        struct pwl_result_data *rgb;
        struct pwl_result_data *rgb_plus_1;
-       struct fixed31_32 y_r;
-       struct fixed31_32 y_g;
-       struct fixed31_32 y_b;
-       struct fixed31_32 y1_min;
-       struct fixed31_32 y3_max;
 
        int32_t region_start, region_end;
        int32_t i;
@@ -443,7 +515,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
 
        PERF_TRACE();
 
-       arr_points = lut_params->arr_points;
+       corner_points = lut_params->corner_points;
        rgb_resulted = lut_params->rgb_resulted;
        hw_points = 0;
 
@@ -489,31 +561,28 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
        rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
        rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
 
-       arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2),
+       corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
                                             dc_fixpt_from_int(region_start));
-       arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2),
+       corner_points[0].green.x = corner_points[0].red.x;
+       corner_points[0].blue.x = corner_points[0].red.x;
+       corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
                                             dc_fixpt_from_int(region_end));
+       corner_points[1].green.x = corner_points[1].red.x;
+       corner_points[1].blue.x = corner_points[1].red.x;
 
-       y_r = rgb_resulted[0].red;
-       y_g = rgb_resulted[0].green;
-       y_b = rgb_resulted[0].blue;
-
-       y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b));
-
-       arr_points[0].y = y1_min;
-       arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x);
-       y_r = rgb_resulted[hw_points - 1].red;
-       y_g = rgb_resulted[hw_points - 1].green;
-       y_b = rgb_resulted[hw_points - 1].blue;
+       corner_points[0].red.y = rgb_resulted[0].red;
+       corner_points[0].green.y = rgb_resulted[0].green;
+       corner_points[0].blue.y = rgb_resulted[0].blue;
 
        /* see comment above, m_arrPoints[1].y should be the Y value for the
         * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
         */
-       y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b));
-
-       arr_points[1].y = y3_max;
-
-       arr_points[1].slope = dc_fixpt_zero;
+       corner_points[1].red.y = rgb_resulted[hw_points - 1].red;
+       corner_points[1].green.y = rgb_resulted[hw_points - 1].green;
+       corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue;
+       corner_points[1].red.slope = dc_fixpt_zero;
+       corner_points[1].green.slope = dc_fixpt_zero;
+       corner_points[1].blue.slope = dc_fixpt_zero;
 
        if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
                /* for PQ, we want to have a straight line from last HW X point,
@@ -522,9 +591,15 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
                const struct fixed31_32 end_value =
                                dc_fixpt_from_int(125);
 
-               arr_points[1].slope = dc_fixpt_div(
-                       dc_fixpt_sub(dc_fixpt_one, arr_points[1].y),
-                       dc_fixpt_sub(end_value, arr_points[1].x));
+               corner_points[1].red.slope = dc_fixpt_div(
+                       dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y),
+                       dc_fixpt_sub(end_value, corner_points[1].red.x));
+               corner_points[1].green.slope = dc_fixpt_div(
+                       dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y),
+                       dc_fixpt_sub(end_value, corner_points[1].green.x));
+               corner_points[1].blue.slope = dc_fixpt_div(
+                       dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y),
+                       dc_fixpt_sub(end_value, corner_points[1].blue.x));
        }
 
        lut_params->hw_points_num = hw_points;
@@ -564,7 +639,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
                ++i;
        }
        cm_helper_convert_to_custom_float(rgb_resulted,
-                                               lut_params->arr_points,
+                                               lut_params->corner_points,
                                                hw_points, false);
 
        return true;
index 7a531b02871f89bdfbd36a1a4279cb23fd24d775..5ae4d69391a568548cca47d9acb1a495fa61ee6c 100644 (file)
@@ -98,7 +98,7 @@ void cm_helper_program_xfer_func(
 
 bool cm_helper_convert_to_custom_float(
                struct pwl_result_data *rgb_resulted,
-               struct curve_points *arr_points,
+               struct curve_points3 *corner_points,
                uint32_t hw_points_num,
                bool fixpoint);
 
index 193184affefbebc7c555411e881195675526d3d8..87495dea45ec0a904d40123fd305935a943bf596 100644 (file)
@@ -45,6 +45,7 @@
 #include "dcn10_hubbub.h"
 #include "dcn10_cm_common.h"
 #include "dc_link_dp.h"
+#include "dccg.h"
 
 #define DC_LOGGER_INIT(logger)
 
@@ -786,7 +787,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
                        &dc->current_state->res_ctx.pipe_ctx[i];
                if (pipe_ctx != NULL) {
                        hubp = pipe_ctx->plane_res.hubp;
-                       if (hubp != NULL) {
+                       if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
                                if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
                                        /* one pipe underflow, we will reset all the pipes*/
                                        need_recover = true;
@@ -812,7 +813,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
                if (pipe_ctx != NULL) {
                        hubp = pipe_ctx->plane_res.hubp;
                        /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
-                       if (hubp != NULL)
+                       if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
                                hubp->funcs->set_hubp_blank_en(hubp, true);
                }
        }
@@ -825,7 +826,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
                if (pipe_ctx != NULL) {
                        hubp = pipe_ctx->plane_res.hubp;
                        /*DCHUBP_CNTL:HUBP_DISABLE=1*/
-                       if (hubp != NULL)
+                       if (hubp != NULL && hubp->funcs->hubp_disable_control)
                                hubp->funcs->hubp_disable_control(hubp, true);
                }
        }
@@ -835,7 +836,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
                if (pipe_ctx != NULL) {
                        hubp = pipe_ctx->plane_res.hubp;
                        /*DCHUBP_CNTL:HUBP_DISABLE=0*/
-                       if (hubp != NULL)
+                       if (hubp != NULL && hubp->funcs->hubp_disable_control)
                                hubp->funcs->hubp_disable_control(hubp, true);
                }
        }
@@ -847,7 +848,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
                if (pipe_ctx != NULL) {
                        hubp = pipe_ctx->plane_res.hubp;
                        /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
-                       if (hubp != NULL)
+                       if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
                                hubp->funcs->set_hubp_blank_en(hubp, true);
                }
        }
@@ -1126,7 +1127,7 @@ static void dcn10_init_hw(struct dc *dc)
 
        enable_power_gating_plane(dc->hwseq, true);
 
-       memset(&dc->res_pool->dccg->clks, 0, sizeof(dc->res_pool->dccg->clks));
+       memset(&dc->res_pool->clk_mgr->clks, 0, sizeof(dc->res_pool->clk_mgr->clks));
 }
 
 static void reset_hw_ctx_wrap(
@@ -1603,7 +1604,7 @@ static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
 }
 
 
-static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
+void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
 {
        struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
        struct vm_system_aperture_param apt = { {{ 0 } } };
@@ -1703,33 +1704,22 @@ static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
        pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
 }
 
-
-static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
+static void dcn10_program_output_csc(struct dc *dc,
+               struct pipe_ctx *pipe_ctx,
                enum dc_color_space colorspace,
-               uint16_t *matrix)
+               uint16_t *matrix,
+               int opp_id)
 {
        if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
-                       if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL)
-                               pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
+               if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL)
+                       pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
        } else {
                if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
                        pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
        }
 }
 
-static void dcn10_program_output_csc(struct dc *dc,
-               struct pipe_ctx *pipe_ctx,
-               enum dc_color_space colorspace,
-               uint16_t *matrix,
-               int opp_id)
-{
-       if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL)
-               program_csc_matrix(pipe_ctx,
-                               colorspace,
-                               matrix);
-}
-
-static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
 {
        if (pipe_ctx->plane_state->visible)
                return true;
@@ -1738,7 +1728,7 @@ static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
        return false;
 }
 
-static bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
 {
        if (pipe_ctx->plane_state->visible)
                return true;
@@ -1747,7 +1737,7 @@ static bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
        return false;
 }
 
-static bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
 {
        if (pipe_ctx->plane_state->visible)
                return true;
@@ -1943,10 +1933,6 @@ static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
        struct mpc *mpc = dc->res_pool->mpc;
        struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
 
-
-
-       /* TODO: proper fix once fpga works */
-
        if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
                dcn10_get_hdr_visual_confirm_color(
                                pipe_ctx, &blnd_cfg.black_color);
@@ -2026,8 +2012,6 @@ static void update_scaler(struct pipe_ctx *pipe_ctx)
        bool per_pixel_alpha =
                        pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
 
-       /* TODO: proper fix once fpga works */
-
        pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
        pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
        /* scaler configuration */
@@ -2035,7 +2019,7 @@ static void update_scaler(struct pipe_ctx *pipe_ctx)
                        pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
 }
 
-static void update_dchubp_dpp(
+void update_dchubp_dpp(
        struct dc *dc,
        struct pipe_ctx *pipe_ctx,
        struct dc_state *context)
@@ -2052,16 +2036,22 @@ static void update_dchubp_dpp(
         */
        if (plane_state->update_flags.bits.full_update) {
                bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <=
-                               dc->res_pool->dccg->clks.dispclk_khz / 2;
+                               dc->res_pool->clk_mgr->clks.dispclk_khz / 2;
 
                dpp->funcs->dpp_dppclk_control(
                                dpp,
                                should_divided_by_2,
                                true);
 
-               dc->res_pool->dccg->clks.dppclk_khz = should_divided_by_2 ?
-                                               dc->res_pool->dccg->clks.dispclk_khz / 2 :
-                                                       dc->res_pool->dccg->clks.dispclk_khz;
+               if (dc->res_pool->dccg)
+                       dc->res_pool->dccg->funcs->update_dpp_dto(
+                                       dc->res_pool->dccg,
+                                       dpp->inst,
+                                       pipe_ctx->plane_res.bw.calc.dppclk_khz);
+               else
+                       dc->res_pool->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
+                                               dc->res_pool->clk_mgr->clks.dispclk_khz / 2 :
+                                                       dc->res_pool->clk_mgr->clks.dispclk_khz;
        }
 
        /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
@@ -2182,7 +2172,7 @@ static void dcn10_blank_pixel_data(
        }
 }
 
-static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
+void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
 {
        struct fixed31_32 multiplier = dc_fixpt_from_fraction(
                        pipe_ctx->plane_state->sdr_white_level, 80);
@@ -2257,47 +2247,7 @@ static void program_all_pipe_in_tree(
        }
 }
 
-static void dcn10_pplib_apply_display_requirements(
-       struct dc *dc,
-       struct dc_state *context)
-{
-       struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
-
-       pp_display_cfg->min_engine_clock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
-       pp_display_cfg->min_memory_clock_khz = dc->res_pool->dccg->clks.fclk_khz;
-       pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
-       pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
-       pp_display_cfg->min_dcfclock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
-       pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
-       dce110_fill_display_configs(context, pp_display_cfg);
-
-       if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
-                       struct dm_pp_display_configuration)) !=  0)
-               dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
-
-       dc->prev_display_config = *pp_display_cfg;
-}
-
-static void optimize_shared_resources(struct dc *dc)
-{
-       if (dc->current_state->stream_count == 0) {
-               /* S0i2 message */
-               dcn10_pplib_apply_display_requirements(dc, dc->current_state);
-       }
-
-       if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
-               dcn_bw_notify_pplib_of_wm_ranges(dc);
-}
-
-static void ready_shared_resources(struct dc *dc, struct dc_state *context)
-{
-       /* S0i2 message */
-       if (dc->current_state->stream_count == 0 &&
-                       context->stream_count != 0)
-               dcn10_pplib_apply_display_requirements(dc, context);
-}
-
-static struct pipe_ctx *find_top_pipe_for_stream(
+struct pipe_ctx *find_top_pipe_for_stream(
                struct dc *dc,
                struct dc_state *context,
                const struct dc_stream_state *stream)
@@ -2398,10 +2348,9 @@ static void dcn10_apply_ctx_for_surface(
                hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
 }
 
-static void dcn10_set_bandwidth(
+static void dcn10_prepare_bandwidth(
                struct dc *dc,
-               struct dc_state *context,
-               bool safe_to_lower)
+               struct dc_state *context)
 {
        if (dc->debug.sanity_checks)
                dcn10_verify_allow_pstate_change_high(dc);
@@ -2410,12 +2359,39 @@ static void dcn10_set_bandwidth(
                if (context->stream_count == 0)
                        context->bw.dcn.clk.phyclk_khz = 0;
 
-               dc->res_pool->dccg->funcs->update_clocks(
-                               dc->res_pool->dccg,
-                               &context->bw.dcn.clk,
-                               safe_to_lower);
+               dc->res_pool->clk_mgr->funcs->update_clocks(
+                               dc->res_pool->clk_mgr,
+                               context,
+                               false);
+       }
 
-               dcn10_pplib_apply_display_requirements(dc, context);
+       hubbub1_program_watermarks(dc->res_pool->hubbub,
+                       &context->bw.dcn.watermarks,
+                       dc->res_pool->ref_clock_inKhz / 1000,
+                       true);
+
+       if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
+               dcn_bw_notify_pplib_of_wm_ranges(dc);
+
+       if (dc->debug.sanity_checks)
+               dcn10_verify_allow_pstate_change_high(dc);
+}
+
+static void dcn10_optimize_bandwidth(
+               struct dc *dc,
+               struct dc_state *context)
+{
+       if (dc->debug.sanity_checks)
+               dcn10_verify_allow_pstate_change_high(dc);
+
+       if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+               if (context->stream_count == 0)
+                       context->bw.dcn.clk.phyclk_khz = 0;
+
+               dc->res_pool->clk_mgr->funcs->update_clocks(
+                               dc->res_pool->clk_mgr,
+                               context,
+                               true);
        }
 
        hubbub1_program_watermarks(dc->res_pool->hubbub,
@@ -2423,6 +2399,9 @@ static void dcn10_set_bandwidth(
                        dc->res_pool->ref_clock_inKhz / 1000,
                        true);
 
+       if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
+               dcn_bw_notify_pplib_of_wm_ranges(dc);
+
        if (dc->debug.sanity_checks)
                dcn10_verify_allow_pstate_change_high(dc);
 }
@@ -2694,7 +2673,6 @@ static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
 
 static const struct hw_sequencer_funcs dcn10_funcs = {
        .program_gamut_remap = program_gamut_remap,
-       .program_csc_matrix = program_csc_matrix,
        .init_hw = dcn10_init_hw,
        .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
        .apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
@@ -2721,7 +2699,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
        .disable_plane = dcn10_disable_plane,
        .blank_pixel_data = dcn10_blank_pixel_data,
        .pipe_control_lock = dcn10_pipe_control_lock,
-       .set_bandwidth = dcn10_set_bandwidth,
+       .prepare_bandwidth = dcn10_prepare_bandwidth,
+       .optimize_bandwidth = dcn10_optimize_bandwidth,
        .reset_hw_ctx_wrap = reset_hw_ctx_wrap,
        .enable_stream_timing = dcn10_enable_stream_timing,
        .set_drr = set_drr,
@@ -2732,10 +2711,6 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
        .log_hw_state = dcn10_log_hw_state,
        .get_hw_state = dcn10_get_hw_state,
        .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
-       .ready_shared_resources = ready_shared_resources,
-       .optimize_shared_resources = optimize_shared_resources,
-       .pplib_apply_display_requirements =
-                       dcn10_pplib_apply_display_requirements,
        .edp_backlight_control = hwss_edp_backlight_control,
        .edp_power_control = hwss_edp_power_control,
        .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
index 84d461e0ed3e2523e8bcf3238547dbd43fe5e13e..5e5610c9e600dd0b63e6351d2b56f5dfc7baacd9 100644 (file)
@@ -51,4 +51,24 @@ void dcn10_get_hw_state(
                char *pBuf, unsigned int bufSize,
                unsigned int mask);
 
+bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+
+bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+
+bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+
+void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp);
+
+void set_hdr_multiplier(struct pipe_ctx *pipe_ctx);
+
+void update_dchubp_dpp(
+       struct dc *dc,
+       struct pipe_ctx *pipe_ctx,
+       struct dc_state *context);
+
+struct pipe_ctx *find_top_pipe_for_stream(
+               struct dc *dc,
+               struct dc_state *context,
+               const struct dc_stream_state *stream);
+
 #endif /* __DC_HWSS_DCN10_H__ */
index ba6a8686062f27e235799e44eaf95d4d03d39778..477ab922221626dd47026d1874383418d0542cc9 100644 (file)
@@ -589,7 +589,7 @@ static bool dcn10_link_encoder_validate_hdmi_output(
                return false;
 
        /* DCE11 HW does not support 420 */
-       if (!enc10->base.features.ycbcr420_supported &&
+       if (!enc10->base.features.hdmi_ycbcr420_supported &&
                        crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
                return false;
 
@@ -606,8 +606,10 @@ bool dcn10_link_encoder_validate_dp_output(
        const struct dcn10_link_encoder *enc10,
        const struct dc_crtc_timing *crtc_timing)
 {
-       if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
-               return false;
+       if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+               if (!enc10->base.features.dp_ycbcr420_supported)
+                       return false;
+       }
 
        return true;
 }
index 54626682bab23bed0a299c06d3a51b34f868c3e0..7d1f66797cb3b0e19ad876c0f6e7397e4d95aee9 100644 (file)
@@ -87,9 +87,8 @@ static void optc1_disable_stereo(struct timing_generator *optc)
        REG_SET(OTG_STEREO_CONTROL, 0,
                OTG_STEREO_EN, 0);
 
-       REG_SET_3(OTG_3D_STRUCTURE_CONTROL, 0,
+       REG_SET_2(OTG_3D_STRUCTURE_CONTROL, 0,
                OTG_3D_STRUCTURE_EN, 0,
-               OTG_3D_STRUCTURE_V_UPDATE_MODE, 0,
                OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0);
 }
 
@@ -274,10 +273,12 @@ void optc1_program_timing(
         * program the reg for interrupt postition.
         */
        vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
-       if (vertical_line_start < 0) {
-               ASSERT(0);
+       v_fp2 = 0;
+       if (vertical_line_start < 0)
+               v_fp2 = -vertical_line_start;
+       if (vertical_line_start < 0)
                vertical_line_start = 0;
-       }
+
        REG_SET(OTG_VERTICAL_INTERRUPT2_POSITION, 0,
                        OTG_VERTICAL_INTERRUPT2_LINE_START, vertical_line_start);
 
@@ -296,9 +297,6 @@ void optc1_program_timing(
                if (patched_crtc_timing.flags.INTERLACE == 1)
                        field_num = 1;
        }
-       v_fp2 = 0;
-       if (optc->dlg_otg_param.vstartup_start > asic_blank_end)
-               v_fp2 = optc->dlg_otg_param.vstartup_start > asic_blank_end;
 
        /* Interlace */
        if (patched_crtc_timing.flags.INTERLACE == 1) {
@@ -1155,9 +1153,8 @@ static void optc1_enable_stereo(struct timing_generator *optc,
                                OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, 1);
 
                if (flags->PROGRAM_STEREO)
-                       REG_UPDATE_3(OTG_3D_STRUCTURE_CONTROL,
+                       REG_UPDATE_2(OTG_3D_STRUCTURE_CONTROL,
                                OTG_3D_STRUCTURE_EN, flags->FRAME_PACKED,
-                               OTG_3D_STRUCTURE_V_UPDATE_MODE, flags->FRAME_PACKED,
                                OTG_3D_STRUCTURE_STEREO_SEL_OVR, flags->FRAME_PACKED);
 
        }
index a71453a15ae352df758831d00274ea2ca2636a6d..47dbe4bb294aeb7218fa2092e2e03065889182de 100644 (file)
 
 #include "resource.h"
 #include "include/irq_service_interface.h"
-#include "dcn10/dcn10_resource.h"
+#include "dcn10_resource.h"
 
-#include "dcn10/dcn10_ipp.h"
-#include "dcn10/dcn10_mpc.h"
+#include "dcn10_ipp.h"
+#include "dcn10_mpc.h"
 #include "irq/dcn10/irq_service_dcn10.h"
-#include "dcn10/dcn10_dpp.h"
+#include "dcn10_dpp.h"
 #include "dcn10_optc.h"
-#include "dcn10/dcn10_hw_sequencer.h"
+#include "dcn10_hw_sequencer.h"
 #include "dce110/dce110_hw_sequencer.h"
-#include "dcn10/dcn10_opp.h"
-#include "dcn10/dcn10_link_encoder.h"
-#include "dcn10/dcn10_stream_encoder.h"
-#include "dce/dce_clocks.h"
+#include "dcn10_opp.h"
+#include "dcn10_link_encoder.h"
+#include "dcn10_stream_encoder.h"
+#include "dcn10_clk_mgr.h"
 #include "dce/dce_clock_source.h"
 #include "dce/dce_audio.h"
 #include "dce/dce_hwseq.h"
-#include "../virtual/virtual_stream_encoder.h"
+#include "virtual/virtual_stream_encoder.h"
 #include "dce110/dce110_resource.h"
 #include "dce112/dce112_resource.h"
 #include "dcn10_hubp.h"
@@ -438,6 +438,7 @@ static const struct dcn_optc_mask tg_mask = {
 
 
 static const struct bios_registers bios_regs = {
+               NBIO_SR(BIOS_SCRATCH_0),
                NBIO_SR(BIOS_SCRATCH_3),
                NBIO_SR(BIOS_SCRATCH_6)
 };
@@ -719,7 +720,8 @@ static struct timing_generator *dcn10_timing_generator_create(
 static const struct encoder_feature_support link_enc_feature = {
                .max_hdmi_deep_color = COLOR_DEPTH_121212,
                .max_hdmi_pixel_clock = 600000,
-               .ycbcr420_supported = true,
+               .hdmi_ycbcr420_supported = true,
+               .dp_ycbcr420_supported = false,
                .flags.bits.IS_HBR2_CAPABLE = true,
                .flags.bits.IS_HBR3_CAPABLE = true,
                .flags.bits.IS_TPS3_CAPABLE = true,
@@ -949,8 +951,8 @@ static void destruct(struct dcn10_resource_pool *pool)
        if (pool->base.dmcu != NULL)
                dce_dmcu_destroy(&pool->base.dmcu);
 
-       if (pool->base.dccg != NULL)
-               dce_dccg_destroy(&pool->base.dccg);
+       if (pool->base.clk_mgr != NULL)
+               dce_clk_mgr_destroy(&pool->base.clk_mgr);
 
        kfree(pool->base.pp_smu);
 }
@@ -1276,8 +1278,8 @@ static bool construct(
                }
        }
 
-       pool->base.dccg = dcn1_dccg_create(ctx);
-       if (pool->base.dccg == NULL) {
+       pool->base.clk_mgr = dcn1_clk_mgr_create(ctx);
+       if (pool->base.clk_mgr == NULL) {
                dm_error("DC: failed to create display clock!\n");
                BREAK_TO_DEBUGGER();
                goto fail;
index f2ea8452d48fe0bb2297d991deb95246998bd6a3..beb08fd12b1d20cde10bcd44994b34348bb67f3c 100644 (file)
@@ -55,10 +55,10 @@ struct pp_smu {
 
 struct pp_smu_wm_set_range {
        unsigned int wm_inst;
-       uint32_t min_fill_clk_khz;
-       uint32_t max_fill_clk_khz;
-       uint32_t min_drain_clk_khz;
-       uint32_t max_drain_clk_khz;
+       uint32_t min_fill_clk_mhz;
+       uint32_t max_fill_clk_mhz;
+       uint32_t min_drain_clk_mhz;
+       uint32_t max_drain_clk_mhz;
 };
 
 #define MAX_WATERMARK_SETS 4
@@ -77,15 +77,15 @@ struct pp_smu_display_requirement_rv {
         */
        unsigned int display_count;
 
-       /* PPSMC_MSG_SetHardMinFclkByFreq: khz
+       /* PPSMC_MSG_SetHardMinFclkByFreq: mhz
         *  FCLK will vary with DPM, but never below requested hard min
         */
-       unsigned int hard_min_fclk_khz;
+       unsigned int hard_min_fclk_mhz;
 
-       /* PPSMC_MSG_SetHardMinDcefclkByFreq: khz
+       /* PPSMC_MSG_SetHardMinDcefclkByFreq: mhz
         *  fixed clock at requested freq, either from FCH bypass or DFS
         */
-       unsigned int hard_min_dcefclk_khz;
+       unsigned int hard_min_dcefclk_mhz;
 
        /* PPSMC_MSG_SetMinDeepSleepDcefclk: mhz
         *  when DF is in cstate, dcf clock is further divided down
@@ -103,13 +103,19 @@ struct pp_smu_funcs_rv {
        void (*set_display_count)(struct pp_smu *pp, int count);
 
        /* which SMU message?  are reader and writer WM separate SMU msg? */
+       /*
+        * PPSMC_MSG_SetDriverDramAddrHigh
+        * PPSMC_MSG_SetDriverDramAddrLow
+        * PPSMC_MSG_TransferTableDram2Smu
+        *
+        * */
        void (*set_wm_ranges)(struct pp_smu *pp,
                        struct pp_smu_wm_range_sets *ranges);
 
        /* PPSMC_MSG_SetHardMinDcfclkByFreq
         * fixed clock at requested freq, either from FCH bypass or DFS
         */
-       void (*set_hard_min_dcfclk_by_freq)(struct pp_smu *pp, int khz);
+       void (*set_hard_min_dcfclk_by_freq)(struct pp_smu *pp, int mhz);
 
        /* PPSMC_MSG_SetMinDeepSleepDcfclk
         * when DF is in cstate, dcf clock is further divided down
@@ -120,12 +126,12 @@ struct pp_smu_funcs_rv {
        /* PPSMC_MSG_SetHardMinFclkByFreq
         * FCLK will vary with DPM, but never below requested hard min
         */
-       void (*set_hard_min_fclk_by_freq)(struct pp_smu *pp, int khz);
+       void (*set_hard_min_fclk_by_freq)(struct pp_smu *pp, int mhz);
 
        /* PPSMC_MSG_SetHardMinSocclkByFreq
         * Needed for DWB support
         */
-       void (*set_hard_min_socclk_by_freq)(struct pp_smu *pp, int khz);
+       void (*set_hard_min_socclk_by_freq)(struct pp_smu *pp, int mhz);
 
        /* PME w/a */
        void (*set_pme_wa_enable)(struct pp_smu *pp);
index 2b83f922ac02667239756e54b5ec30fd7ed65f15..1af8c777b3acbc15a3e7113af89a130bc9f8c134 100644 (file)
@@ -208,22 +208,20 @@ struct dm_bl_data_point {
                /* Brightness level as effective value in range 0-255,
                 * corresponding to above percentage
                 */
-               uint8_t signalLevel;
+               uint8_t signal_level;
 };
 
 /* Total size of the structure should not exceed 256 bytes */
 struct dm_acpi_atif_backlight_caps {
-
-
        uint16_t size; /* Bytes 0-1 (2 bytes) */
        uint16_t flags; /* Byted 2-3 (2 bytes) */
-       uint8_t  errorCode; /* Byte 4 */
-       uint8_t  acLevelPercentage; /* Byte 5 */
-       uint8_t  dcLevelPercentage; /* Byte 6 */
-       uint8_t  minInputSignal; /* Byte 7 */
-       uint8_t  maxInputSignal; /* Byte 8 */
-       uint8_t  numOfDataPoints; /* Byte 9 */
-       struct dm_bl_data_point dataPoints[99]; /* Bytes 10-207 (198 bytes)*/
+       uint8_t  error_code; /* Byte 4 */
+       uint8_t  ac_level_percentage; /* Byte 5 */
+       uint8_t  dc_level_percentage; /* Byte 6 */
+       uint8_t  min_input_signal; /* Byte 7 */
+       uint8_t  max_input_signal; /* Byte 8 */
+       uint8_t  num_data_points; /* Byte 9 */
+       struct dm_bl_data_point data_points[99]; /* Bytes 10-207 (198 bytes)*/
 };
 
 enum dm_acpi_display_type {
index cbafce649e3334e9e834fb0d9492a7d10ae07c46..5dd04520ceca5fa1ad0d52c74ee055087c660cb9 100644 (file)
@@ -113,7 +113,8 @@ struct _vcs_dpi_soc_bounding_box_st {
        int use_urgent_burst_bw;
        double max_hscl_ratio;
        double max_vscl_ratio;
-       struct _vcs_dpi_voltage_scaling_st clock_limits[7];
+       unsigned int num_states;
+       struct _vcs_dpi_voltage_scaling_st clock_limits[8];
 };
 
 struct _vcs_dpi_ip_params_st {
index 39ee8eba3c31ae886b34b96db767a292c79a6f4d..d1656c9d50dfe88d3896adbe92b00f0adddbc2b8 100644 (file)
@@ -126,7 +126,7 @@ static inline struct bw_fixed bw_div(const struct bw_fixed arg1, const struct bw
 static inline struct bw_fixed bw_mod(const struct bw_fixed arg1, const struct bw_fixed arg2)
 {
        struct bw_fixed res;
-       div64_u64_rem(arg1.value, arg2.value, &res.value);
+       div64_u64_rem(arg1.value, arg2.value, (uint64_t *)&res.value);
        return res;
 }
 
index c1976c175b5781afe3d5972215352dfd4d4dd686..e3ee96afa60e8b0d11c6422a4589cd0306d5461c 100644 (file)
@@ -82,7 +82,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option);
 
 void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
 /********** DAL Core*********************/
-#include "display_clock.h"
+#include "hw/clk_mgr.h"
 #include "transform.h"
 #include "dpp.h"
 
@@ -169,6 +169,7 @@ struct resource_pool {
        unsigned int audio_count;
        struct audio_support audio_support;
 
+       struct clk_mgr *clk_mgr;
        struct dccg *dccg;
        struct irq_service *irqs;
 
@@ -287,7 +288,7 @@ struct dc_state {
        struct dcn_bw_internal_vars dcn_bw_vars;
 #endif
 
-       struct dccg *dis_clk;
+       struct clk_mgr *dccg;
 
        struct kref refcount;
 };
index e688eb9b975c25cd2c0d15481872a9e73924ac95..ece954a40a8e3d2a1712925f47e4954218250c10 100644 (file)
@@ -31,8 +31,8 @@
 #define __DCN_CALCS_H__
 
 #include "bw_fixed.h"
-#include "display_clock.h"
 #include "../dml/display_mode_lib.h"
+#include "hw/clk_mgr.h"
 
 struct dc;
 struct dc_state;
index a83a484946133d00311c8f1219ba2bcf17527805..abc961c0906ea7729d49f0606dcfe6150d1a584e 100644 (file)
@@ -47,12 +47,18 @@ struct abm_funcs {
        bool (*set_abm_level)(struct abm *abm, unsigned int abm_level);
        bool (*set_abm_immediate_disable)(struct abm *abm);
        bool (*init_backlight)(struct abm *abm);
-       bool (*set_backlight_level)(struct abm *abm,
-                       unsigned int backlight_level,
+
+       /* backlight_pwm_u16_16 is unsigned 32 bit,
+        * 16 bit integer + 16 fractional, where 1.0 is max backlight value.
+        */
+       bool (*set_backlight_level_pwm)(struct abm *abm,
+                       unsigned int backlight_pwm_u16_16,
                        unsigned int frame_ramp,
                        unsigned int controller_id,
                        bool use_smooth_brightness);
-       unsigned int (*get_current_backlight_8_bit)(struct abm *abm);
+
+       unsigned int (*get_current_backlight)(struct abm *abm);
+       unsigned int (*get_target_backlight)(struct abm *abm);
 };
 
 #endif
similarity index 63%
rename from drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
rename to drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 689faa16c0aea791eed325c2ee37ecaf3932aa7a..23a4b18e5feee64ca0b0d7a1a2f311eae3575ab3 100644 (file)
  *
  */
 
-#ifndef __DISPLAY_CLOCK_H__
-#define __DISPLAY_CLOCK_H__
+#ifndef __DAL_CLK_MGR_H__
+#define __DAL_CLK_MGR_H__
 
 #include "dm_services_types.h"
 #include "dc.h"
 
-/* Structure containing all state-dependent clocks
- * (dependent on "enum clocks_state") */
-struct state_dependent_clocks {
-       int display_clk_khz;
-       int pixel_clk_khz;
-};
-
-struct dccg {
+struct clk_mgr {
        struct dc_context *ctx;
-       const struct display_clock_funcs *funcs;
+       const struct clk_mgr_funcs *funcs;
 
-       enum dm_pp_clocks_state max_clks_state;
-       enum dm_pp_clocks_state cur_min_clks_state;
        struct dc_clocks clks;
 };
 
-struct display_clock_funcs {
-       void (*update_clocks)(struct dccg *dccg,
-                       struct dc_clocks *new_clocks,
+struct clk_mgr_funcs {
+       void (*update_clocks)(struct clk_mgr *clk_mgr,
+                       struct dc_state *context,
                        bool safe_to_lower);
-       int (*set_dispclk)(struct dccg *dccg,
-               int requested_clock_khz);
-
-       int (*get_dp_ref_clk_frequency)(struct dccg *dccg);
 
-       bool (*update_dfs_bypass)(struct dccg *dccg,
-               struct dc *dc,
-               struct dc_state *context,
-               int requested_clock_khz);
+       int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr);
 };
 
-#endif /* __DISPLAY_CLOCK_H__ */
+#endif /* __DAL_CLK_MGR_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
new file mode 100644 (file)
index 0000000..95a56d0
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCCG_H__
+#define __DAL_DCCG_H__
+
+#include "dc_types.h"
+
+struct dccg {
+       struct dc_context *ctx;
+       const struct dccg_funcs *funcs;
+
+       int ref_dppclk;
+};
+
+struct dccg_funcs {
+       void (*update_dpp_dto)(struct dccg *dccg,
+                       int dpp_inst,
+                       int req_dppclk);
+};
+
+#endif //__DAL_DCCG_H__
index cf7433ebf91a07557b1f4328980ba4bdcf15ab07..da85537a448881689b8333a7d33f2d477172c21f 100644 (file)
@@ -53,6 +53,12 @@ struct curve_points {
        uint32_t custom_float_slope;
 };
 
+struct curve_points3 {
+       struct curve_points red;
+       struct curve_points green;
+       struct curve_points blue;
+};
+
 struct pwl_result_data {
        struct fixed31_32 red;
        struct fixed31_32 green;
@@ -71,9 +77,17 @@ struct pwl_result_data {
        uint32_t delta_blue_reg;
 };
 
+/* arr_curve_points - regamma regions/segments specification
+ * arr_points - beginning and end point specified separately (only one on DCE)
+ * corner_points - beginning and end point for all 3 colors (DCN)
+ * rgb_resulted - final curve
+ */
 struct pwl_params {
        struct gamma_curve arr_curve_points[34];
-       struct curve_points arr_points[2];
+       union {
+               struct curve_points arr_points[2];
+               struct curve_points3 corner_points[2];
+       };
        struct pwl_result_data rgb_resulted[256 + 3];
        uint32_t hw_points_num;
 };
index e28e9770e0a3638cca907b213a63aceb5787588e..c20fdcaac53bb7e29e51ad7eccdbb58d0292e65a 100644 (file)
@@ -65,7 +65,8 @@ struct encoder_feature_support {
 
        enum dc_color_depth max_hdmi_deep_color;
        unsigned int max_hdmi_pixel_clock;
-       bool ycbcr420_supported;
+       bool hdmi_ycbcr420_supported;
+       bool dp_ycbcr420_supported;
 };
 
 union dpcd_psr_configuration {
index da89c2edb07c758034d640045201465833dfa245..06df02ddff6a045a9f201e78a504b57453f33717 100644 (file)
@@ -31,7 +31,7 @@
 #include "dml/display_mode_structs.h"
 
 struct dchub_init_data;
-struct cstate_pstate_watermarks_st {
+struct cstate_pstate_watermarks_st1 {
        uint32_t cstate_exit_ns;
        uint32_t cstate_enter_plus_exit_ns;
        uint32_t pstate_change_ns;
@@ -40,7 +40,7 @@ struct cstate_pstate_watermarks_st {
 struct dcn_watermarks {
        uint32_t pte_meta_urgent_ns;
        uint32_t urgent_ns;
-       struct cstate_pstate_watermarks_st cstate_pstate;
+       struct cstate_pstate_watermarks_st1 cstate_pstate;
 };
 
 struct dcn_watermark_set {
index 26f29d5da3d8c098ed695b17476a7bc6c9d77826..e9b702ce02ddca675fbaf4b0331dfc201209f9a3 100644 (file)
@@ -32,8 +32,6 @@
 #include "inc/hw/link_encoder.h"
 #include "core_status.h"
 
-#define EDP_BACKLIGHT_RAMP_DISABLE_LEVEL 0xFFFFFFFF
-
 enum pipe_gating_control {
        PIPE_GATING_CONTROL_DISABLE = 0,
        PIPE_GATING_CONTROL_ENABLE,
@@ -87,11 +85,6 @@ struct hw_sequencer_funcs {
        void (*program_gamut_remap)(
                        struct pipe_ctx *pipe_ctx);
 
-       void (*program_csc_matrix)(
-                       struct pipe_ctx *pipe_ctx,
-                       enum dc_color_space colorspace,
-                       uint16_t *matrix);
-
        void (*program_output_csc)(struct dc *dc,
                        struct pipe_ctx *pipe_ctx,
                        enum dc_color_space colorspace,
@@ -177,10 +170,12 @@ struct hw_sequencer_funcs {
                        struct pipe_ctx *pipe_ctx,
                        bool blank);
 
-       void (*set_bandwidth)(
+       void (*prepare_bandwidth)(
                        struct dc *dc,
-                       struct dc_state *context,
-                       bool safe_to_lower);
+                       struct dc_state *context);
+       void (*optimize_bandwidth)(
+                       struct dc *dc,
+                       struct dc_state *context);
 
        void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
                        int vmin, int vmax);
@@ -210,11 +205,6 @@ struct hw_sequencer_funcs {
                        struct resource_pool *res_pool,
                        struct pipe_ctx *pipe_ctx);
 
-       void (*ready_shared_resources)(struct dc *dc, struct dc_state *context);
-       void (*optimize_shared_resources)(struct dc *dc);
-       void (*pplib_apply_display_requirements)(
-                       struct dc *dc,
-                       struct dc_state *context);
        void (*edp_power_control)(
                        struct dc_link *link,
                        bool enable);
index 33b99e3ab10d59bb63dc43cc38bd67f66005ef48..0086a2f1d21a1983d30bfd6509bdfc65ed731845 100644 (file)
@@ -30,9 +30,6 @@
 #include "dal_asic_id.h"
 #include "dm_pp_smu.h"
 
-/* TODO unhardcode, 4 for CZ*/
-#define MEMORY_TYPE_MULTIPLIER 4
-
 enum dce_version resource_parse_asic_id(
                struct hw_asic_id asic_id);
 
index cdcefd08748784b353bed088aab4ed642213b8c9..7480f072c37531995741c0c0ef0f6c53b2eaf723 100644 (file)
@@ -306,6 +306,18 @@ static struct fixed31_32 translate_from_linear_space(
                        a1);
 }
 
+static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg)
+{
+       struct fixed31_32 gamma = dc_fixpt_from_fraction(22, 10);
+
+       return translate_from_linear_space(arg,
+                       dc_fixpt_zero,
+                       dc_fixpt_zero,
+                       dc_fixpt_zero,
+                       dc_fixpt_zero,
+                       gamma);
+}
+
 static struct fixed31_32 translate_to_linear_space(
        struct fixed31_32 arg,
        struct fixed31_32 a0,
@@ -709,6 +721,169 @@ static void build_regamma(struct pwl_float_data_ex *rgb_regamma,
        }
 }
 
+static void hermite_spline_eetf(struct fixed31_32 input_x,
+                               struct fixed31_32 max_display,
+                               struct fixed31_32 min_display,
+                               struct fixed31_32 max_content,
+                               struct fixed31_32 *out_x)
+{
+       struct fixed31_32 min_lum_pq;
+       struct fixed31_32 max_lum_pq;
+       struct fixed31_32 max_content_pq;
+       struct fixed31_32 ks;
+       struct fixed31_32 E1;
+       struct fixed31_32 E2;
+       struct fixed31_32 E3;
+       struct fixed31_32 t;
+       struct fixed31_32 t2;
+       struct fixed31_32 t3;
+       struct fixed31_32 two;
+       struct fixed31_32 three;
+       struct fixed31_32 temp1;
+       struct fixed31_32 temp2;
+       struct fixed31_32 a = dc_fixpt_from_fraction(15, 10);
+       struct fixed31_32 b = dc_fixpt_from_fraction(5, 10);
+       struct fixed31_32 epsilon = dc_fixpt_from_fraction(1, 1000000); // dc_fixpt_epsilon is a bit too small
+
+       if (dc_fixpt_eq(max_content, dc_fixpt_zero)) {
+               *out_x = dc_fixpt_zero;
+               return;
+       }
+
+       compute_pq(input_x, &E1);
+       compute_pq(dc_fixpt_div(min_display, max_content), &min_lum_pq);
+       compute_pq(dc_fixpt_div(max_display, max_content), &max_lum_pq);
+       compute_pq(dc_fixpt_one, &max_content_pq); // always 1? DAL2 code is weird
+       a = dc_fixpt_div(dc_fixpt_add(dc_fixpt_one, b), max_content_pq); // (1+b)/maxContent
+       ks = dc_fixpt_sub(dc_fixpt_mul(a, max_lum_pq), b); // a * max_lum_pq - b
+
+       if (dc_fixpt_lt(E1, ks))
+               E2 = E1;
+       else if (dc_fixpt_le(ks, E1) && dc_fixpt_le(E1, dc_fixpt_one)) {
+               if (dc_fixpt_lt(epsilon, dc_fixpt_sub(dc_fixpt_one, ks)))
+                       // t = (E1 - ks) / (1 - ks)
+                       t = dc_fixpt_div(dc_fixpt_sub(E1, ks),
+                                       dc_fixpt_sub(dc_fixpt_one, ks));
+               else
+                       t = dc_fixpt_zero;
+
+               two = dc_fixpt_from_int(2);
+               three = dc_fixpt_from_int(3);
+
+               t2 = dc_fixpt_mul(t, t);
+               t3 = dc_fixpt_mul(t2, t);
+               temp1 = dc_fixpt_mul(two, t3);
+               temp2 = dc_fixpt_mul(three, t2);
+
+               // (2t^3 - 3t^2 + 1) * ks
+               E2 = dc_fixpt_mul(ks, dc_fixpt_add(dc_fixpt_one,
+                               dc_fixpt_sub(temp1, temp2)));
+
+               // (-2t^3 + 3t^2) * max_lum_pq
+               E2 = dc_fixpt_add(E2, dc_fixpt_mul(max_lum_pq,
+                               dc_fixpt_sub(temp2, temp1)));
+
+               temp1 = dc_fixpt_mul(two, t2);
+               temp2 = dc_fixpt_sub(dc_fixpt_one, ks);
+
+               // (t^3 - 2t^2 + t) * (1-ks)
+               E2 = dc_fixpt_add(E2, dc_fixpt_mul(temp2,
+                               dc_fixpt_add(t, dc_fixpt_sub(t3, temp1))));
+       } else
+               E2 = dc_fixpt_one;
+
+       temp1 = dc_fixpt_sub(dc_fixpt_one, E2);
+       temp2 = dc_fixpt_mul(temp1, temp1);
+       temp2 = dc_fixpt_mul(temp2, temp2);
+       // temp2 = (1-E2)^4
+
+       E3 =  dc_fixpt_add(E2, dc_fixpt_mul(min_lum_pq, temp2));
+       compute_de_pq(E3, out_x);
+
+       *out_x = dc_fixpt_div(*out_x, dc_fixpt_div(max_display, max_content));
+}
+
+static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
+               uint32_t hw_points_num,
+               const struct hw_x_point *coordinate_x,
+               const struct freesync_hdr_tf_params *fs_params)
+{
+       uint32_t i;
+       struct pwl_float_data_ex *rgb = rgb_regamma;
+       const struct hw_x_point *coord_x = coordinate_x;
+       struct fixed31_32 scaledX = dc_fixpt_zero;
+       struct fixed31_32 scaledX1 = dc_fixpt_zero;
+       struct fixed31_32 max_display = dc_fixpt_from_int(fs_params->max_display);
+       struct fixed31_32 min_display = dc_fixpt_from_fraction(fs_params->min_display, 10000);
+       struct fixed31_32 max_content = dc_fixpt_from_int(fs_params->max_content);
+       struct fixed31_32 min_content = dc_fixpt_from_fraction(fs_params->min_content, 10000);
+       struct fixed31_32 clip = dc_fixpt_one;
+       struct fixed31_32 output;
+       bool use_eetf = false;
+       bool is_clipped = false;
+       struct fixed31_32 sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level);
+
+       if (fs_params == NULL || fs_params->max_content == 0 ||
+                       fs_params->max_display == 0)
+               return false;
+
+       if (fs_params->min_display > 1000) // cap at 0.1 at the bottom
+               min_display = dc_fixpt_from_fraction(1, 10);
+       if (fs_params->max_display < 100) // cap at 100 at the top
+               max_display = dc_fixpt_from_int(100);
+
+       if (fs_params->min_content < fs_params->min_display)
+               use_eetf = true;
+       else
+               min_content = min_display;
+
+       if (fs_params->max_content > fs_params->max_display)
+               use_eetf = true;
+       else
+               max_content = max_display;
+
+       rgb += 32; // first 32 points have problems with fixed point, too small
+       coord_x += 32;
+       for (i = 32; i <= hw_points_num; i++) {
+               if (!is_clipped) {
+                       if (use_eetf) {
+                               /*max content is equal 1 */
+                               scaledX1 = dc_fixpt_div(coord_x->x,
+                                               dc_fixpt_div(max_content, sdr_white_level));
+                               hermite_spline_eetf(scaledX1, max_display, min_display,
+                                               max_content, &scaledX);
+                       } else
+                               scaledX = dc_fixpt_div(coord_x->x,
+                                               dc_fixpt_div(max_display, sdr_white_level));
+
+                       if (dc_fixpt_lt(scaledX, clip)) {
+                               if (dc_fixpt_lt(scaledX, dc_fixpt_zero))
+                                       output = dc_fixpt_zero;
+                               else
+                                       output = calculate_gamma22(scaledX);
+
+                               rgb->r = output;
+                               rgb->g = output;
+                               rgb->b = output;
+                       } else {
+                               is_clipped = true;
+                               rgb->r = clip;
+                               rgb->g = clip;
+                               rgb->b = clip;
+                       }
+               } else {
+                       rgb->r = clip;
+                       rgb->g = clip;
+                       rgb->b = clip;
+               }
+
+               ++coord_x;
+               ++rgb;
+       }
+
+       return true;
+}
+
 static void build_degamma(struct pwl_float_data_ex *curve,
                uint32_t hw_points_num,
                const struct hw_x_point *coordinate_x, bool is_2_4)
@@ -1356,7 +1531,8 @@ static bool map_regamma_hw_to_x_user(
 #define _EXTRA_POINTS 3
 
 bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
-               const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed)
+               const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
+               const struct freesync_hdr_tf_params *fs_params)
 {
        struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
        struct dividers dividers;
@@ -1374,7 +1550,7 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
        /* we can use hardcoded curve for plain SRGB TF */
        if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true &&
                        output_tf->tf == TRANSFER_FUNCTION_SRGB &&
-                       (!mapUserRamp && ramp->type == GAMMA_RGB_256))
+                       (ramp->is_identity || (!mapUserRamp && ramp->type == GAMMA_RGB_256)))
                return true;
 
        output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
@@ -1424,6 +1600,12 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
                                MAX_HW_POINTS,
                                coordinates_x,
                                output_tf->sdr_ref_white_level);
+       } else if (tf == TRANSFER_FUNCTION_GAMMA22 &&
+                       fs_params != NULL) {
+               build_freesync_hdr(rgb_regamma,
+                               MAX_HW_POINTS,
+                               coordinates_x,
+                               fs_params);
        } else {
                tf_pts->end_exponent = 0;
                tf_pts->x_point_at_y1_red = 1;
index 63ccb9c91224796fbce9e78ab2b241f28c64514d..a6e164df090a669542cb195ee43552cffab2c23d 100644 (file)
@@ -73,12 +73,21 @@ struct regamma_lut {
        };
 };
 
+struct freesync_hdr_tf_params {
+       unsigned int sdr_white_level;
+       unsigned int min_content; // luminance in 1/10000 nits
+       unsigned int max_content; // luminance in nits
+       unsigned int min_display; // luminance in 1/10000 nits
+       unsigned int max_display; // luminance in nits
+};
+
 void setup_x_points_distribution(void);
 void precompute_pq(void);
 void precompute_de_pq(void);
 
 bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
-               const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed);
+               const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
+               const struct freesync_hdr_tf_params *fs_params);
 
 bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf,
                const struct dc_gamma *ramp, bool mapUserRamp);
index 4018c7180d007a8a83ee549a99d6f3f02b07cb7f..620a171620ee8dbb4bf77eedb3da5eb8937973ff 100644 (file)
@@ -37,6 +37,8 @@
 #define RENDER_TIMES_MAX_COUNT 10
 /* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
 #define BTR_EXIT_MARGIN 2000
+/*Threshold to exit fixed refresh rate*/
+#define FIXED_REFRESH_EXIT_MARGIN_IN_HZ 4
 /* Number of consecutive frames to check before entering/exiting fixed refresh*/
 #define FIXED_REFRESH_ENTER_FRAME_COUNT 5
 #define FIXED_REFRESH_EXIT_FRAME_COUNT 5
@@ -257,40 +259,14 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
                if (in_out_vrr->btr.btr_active) {
                        in_out_vrr->btr.frame_counter = 0;
                        in_out_vrr->btr.btr_active = false;
-
-               /* Exit Fixed Refresh mode */
-               } else if (in_out_vrr->fixed.fixed_active) {
-
-                       in_out_vrr->fixed.frame_counter++;
-
-                       if (in_out_vrr->fixed.frame_counter >
-                                       FIXED_REFRESH_EXIT_FRAME_COUNT) {
-                               in_out_vrr->fixed.frame_counter = 0;
-                               in_out_vrr->fixed.fixed_active = false;
-                       }
                }
        } else if (last_render_time_in_us > max_render_time_in_us) {
                /* Enter Below the Range */
-               if (!in_out_vrr->btr.btr_active &&
-                               in_out_vrr->btr.btr_enabled) {
-                       in_out_vrr->btr.btr_active = true;
-
-               /* Enter Fixed Refresh mode */
-               } else if (!in_out_vrr->fixed.fixed_active &&
-                               !in_out_vrr->btr.btr_enabled) {
-                       in_out_vrr->fixed.frame_counter++;
-
-                       if (in_out_vrr->fixed.frame_counter >
-                                       FIXED_REFRESH_ENTER_FRAME_COUNT) {
-                               in_out_vrr->fixed.frame_counter = 0;
-                               in_out_vrr->fixed.fixed_active = true;
-                       }
-               }
+               in_out_vrr->btr.btr_active = true;
        }
 
        /* BTR set to "not active" so disengage */
        if (!in_out_vrr->btr.btr_active) {
-               in_out_vrr->btr.btr_active = false;
                in_out_vrr->btr.inserted_duration_in_us = 0;
                in_out_vrr->btr.frames_to_insert = 0;
                in_out_vrr->btr.frame_counter = 0;
@@ -375,7 +351,12 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync,
        bool update = false;
        unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
 
-       if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) {
+       //Compute the exit refresh rate and exit frame duration
+       unsigned int exit_refresh_rate_in_milli_hz = ((1000000000/max_render_time_in_us)
+                       + (1000*FIXED_REFRESH_EXIT_MARGIN_IN_HZ));
+       unsigned int exit_frame_duration_in_us = 1000000000/exit_refresh_rate_in_milli_hz;
+
+       if (last_render_time_in_us < exit_frame_duration_in_us) {
                /* Exit Fixed Refresh mode */
                if (in_out_vrr->fixed.fixed_active) {
                        in_out_vrr->fixed.frame_counter++;
index 2083c308007cde72412f7ad7291dc85fe882e532..470d7b89071a40163dc039af84a4bc345852cfcf 100644 (file)
@@ -133,6 +133,10 @@ enum PP_FEATURE_MASK {
        PP_AVFS_MASK = 0x40000,
 };
 
+enum DC_FEATURE_MASK {
+       DC_FBC_MASK = 0x1,
+};
+
 /**
  * struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
  */
index d2e7c0fa96c2f7263f367cf67597993aa9ebcbf5..8eb0bb241210bdffe3ff4f3e280bed4856a1c810 100644 (file)
@@ -1325,7 +1325,7 @@ struct atom_smu_info_v3_3 {
   struct   atom_common_table_header  table_header;
   uint8_t  smuip_min_ver;
   uint8_t  smuip_max_ver;
-  uint8_t  smu_rsd1;
+  uint8_t  waflclk_ss_mode;
   uint8_t  gpuclk_ss_mode;
   uint16_t sclk_ss_percentage;
   uint16_t sclk_ss_rate_10hz;
@@ -1355,7 +1355,10 @@ struct atom_smu_info_v3_3 {
   uint32_t syspll3_1_vco_freq_10khz;
   uint32_t bootup_fclk_10khz;
   uint32_t bootup_waflclk_10khz;
-  uint32_t reserved[3];
+  uint32_t smu_info_caps;
+  uint16_t waflclk_ss_percentage;    // in unit of 0.001%
+  uint16_t smuinitoffset;
+  uint32_t reserved;
 };
 
 /*
index 64ecffd5212663c4d1ba9c90181f7df320289db8..58ac0b90c310e721ea68ba2eae2b04473cf56e90 100644 (file)
@@ -205,20 +205,6 @@ struct tile_config {
 /**
  * struct kfd2kgd_calls
  *
- * @init_gtt_mem_allocation: Allocate a buffer on the gart aperture.
- * The buffer can be used for mqds, hpds, kernel queue, fence and runlists
- *
- * @free_gtt_mem: Frees a buffer that was allocated on the gart aperture
- *
- * @get_local_mem_info: Retrieves information about GPU local memory
- *
- * @get_gpu_clock_counter: Retrieves GPU clock counter
- *
- * @get_max_engine_clock_in_mhz: Retrieves maximum GPU clock in MHz
- *
- * @alloc_pasid: Allocate a PASID
- * @free_pasid: Free a PASID
- *
  * @program_sh_mem_settings: A function that should initiate the memory
  * properties such as main aperture memory type (cache / non cached) and
  * secondary aperture base address, size and memory type.
@@ -255,64 +241,16 @@ struct tile_config {
  *
  * @get_tile_config: Returns GPU-specific tiling mode information
  *
- * @get_cu_info: Retrieves activated cu info
- *
- * @get_vram_usage: Returns current VRAM usage
- *
- * @create_process_vm: Create a VM address space for a given process and GPU
- *
- * @destroy_process_vm: Destroy a VM
- *
- * @get_process_page_dir: Get physical address of a VM page directory
- *
  * @set_vm_context_page_table_base: Program page table base for a VMID
  *
- * @alloc_memory_of_gpu: Allocate GPUVM memory
- *
- * @free_memory_of_gpu: Free GPUVM memory
- *
- * @map_memory_to_gpu: Map GPUVM memory into a specific VM address
- * space. Allocates and updates page tables and page directories as
- * needed. This function may return before all page table updates have
- * completed. This allows multiple map operations (on multiple GPUs)
- * to happen concurrently. Use sync_memory to synchronize with all
- * pending updates.
- *
- * @unmap_memor_to_gpu: Unmap GPUVM memory from a specific VM address space
- *
- * @sync_memory: Wait for pending page table updates to complete
- *
- * @map_gtt_bo_to_kernel: Map a GTT BO for kernel access
- * Pins the BO, maps it to kernel address space. Such BOs are never evicted.
- * The kernel virtual address remains valid until the BO is freed.
- *
- * @restore_process_bos: Restore all BOs that belong to the
- * process. This is intended for restoring memory mappings after a TTM
- * eviction.
- *
  * @invalidate_tlbs: Invalidate TLBs for a specific PASID
  *
  * @invalidate_tlbs_vmid: Invalidate TLBs for a specific VMID
  *
- * @submit_ib: Submits an IB to the engine specified by inserting the
- * IB to the corresponding ring (ring type). The IB is executed with the
- * specified VMID in a user mode context.
- *
- * @get_vm_fault_info: Return information about a recent VM fault on
- * GFXv7 and v8. If multiple VM faults occurred since the last call of
- * this function, it will return information about the first of those
- * faults. On GFXv9 VM fault information is fully contained in the IH
- * packet and this function is not needed.
- *
  * @read_vmid_from_vmfault_reg: On Hawaii the VMID is not set in the
  * IH ring entry. This function allows the KFD ISR to get the VMID
  * from the fault status register as early as possible.
  *
- * @gpu_recover: let kgd reset gpu after kfd detect CPC hang
- *
- * @set_compute_idle: Indicates that compute is idle on a device. This
- * can be used to change power profiles depending on compute activity.
- *
  * @get_hive_id: Returns hive id of current  device,  0 if xgmi is not enabled
  *
  * This structure contains function pointers to services that the kgd driver
@@ -320,21 +258,6 @@ struct tile_config {
  *
  */
 struct kfd2kgd_calls {
-       int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size,
-                                       void **mem_obj, uint64_t *gpu_addr,
-                                       void **cpu_ptr, bool mqd_gfx9);
-
-       void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj);
-
-       void (*get_local_mem_info)(struct kgd_dev *kgd,
-                       struct kfd_local_mem_info *mem_info);
-       uint64_t (*get_gpu_clock_counter)(struct kgd_dev *kgd);
-
-       uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd);
-
-       int (*alloc_pasid)(unsigned int bits);
-       void (*free_pasid)(unsigned int pasid);
-
        /* Register access functions */
        void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid,
                        uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
@@ -398,49 +321,11 @@ struct kfd2kgd_calls {
                                uint64_t va, uint32_t vmid);
        int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
 
-       void (*get_cu_info)(struct kgd_dev *kgd,
-                       struct kfd_cu_info *cu_info);
-       uint64_t (*get_vram_usage)(struct kgd_dev *kgd);
-
-       int (*create_process_vm)(struct kgd_dev *kgd, unsigned int pasid, void **vm,
-                       void **process_info, struct dma_fence **ef);
-       int (*acquire_process_vm)(struct kgd_dev *kgd, struct file *filp,
-                       unsigned int pasid, void **vm, void **process_info,
-                       struct dma_fence **ef);
-       void (*destroy_process_vm)(struct kgd_dev *kgd, void *vm);
-       void (*release_process_vm)(struct kgd_dev *kgd, void *vm);
-       uint64_t (*get_process_page_dir)(void *vm);
        void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
                        uint32_t vmid, uint64_t page_table_base);
-       int (*alloc_memory_of_gpu)(struct kgd_dev *kgd, uint64_t va,
-                       uint64_t size, void *vm,
-                       struct kgd_mem **mem, uint64_t *offset,
-                       uint32_t flags);
-       int (*free_memory_of_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem);
-       int (*map_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem,
-                       void *vm);
-       int (*unmap_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem,
-                       void *vm);
-       int (*sync_memory)(struct kgd_dev *kgd, struct kgd_mem *mem, bool intr);
-       int (*map_gtt_bo_to_kernel)(struct kgd_dev *kgd, struct kgd_mem *mem,
-                       void **kptr, uint64_t *size);
-       int (*restore_process_bos)(void *process_info, struct dma_fence **ef);
-
        int (*invalidate_tlbs)(struct kgd_dev *kgd, uint16_t pasid);
        int (*invalidate_tlbs_vmid)(struct kgd_dev *kgd, uint16_t vmid);
-
-       int (*submit_ib)(struct kgd_dev *kgd, enum kgd_engine_type engine,
-                       uint32_t vmid, uint64_t gpu_addr,
-                       uint32_t *ib_cmd, uint32_t ib_len);
-
-       int (*get_vm_fault_info)(struct kgd_dev *kgd,
-                       struct kfd_vm_fault_info *info);
        uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
-
-       void (*gpu_recover)(struct kgd_dev *kgd);
-
-       void (*set_compute_idle)(struct kgd_dev *kgd, bool idle);
-
        uint64_t (*get_hive_id)(struct kgd_dev *kgd);
 
 };
index 5e19f5977eb19e551eecad7f72f9966bfbe8c768..d138ddae563d2cfdf6ff9c878a42e971612509fb 100644 (file)
@@ -967,7 +967,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
            PP_CAP(PHM_PlatformCaps_TDRamping) ||
            PP_CAP(PHM_PlatformCaps_TCPRamping)) {
 
-               adev->gfx.rlc.funcs->enter_safe_mode(adev);
+               amdgpu_gfx_rlc_enter_safe_mode(adev);
                mutex_lock(&adev->grbm_idx_mutex);
                value = 0;
                value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
@@ -1014,13 +1014,13 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
                                        "Failed to enable DPM DIDT.", goto error);
                }
                mutex_unlock(&adev->grbm_idx_mutex);
-               adev->gfx.rlc.funcs->exit_safe_mode(adev);
+               amdgpu_gfx_rlc_exit_safe_mode(adev);
        }
 
        return 0;
 error:
        mutex_unlock(&adev->grbm_idx_mutex);
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
        return result;
 }
 
@@ -1034,7 +1034,7 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
            PP_CAP(PHM_PlatformCaps_TDRamping) ||
            PP_CAP(PHM_PlatformCaps_TCPRamping)) {
 
-               adev->gfx.rlc.funcs->enter_safe_mode(adev);
+               amdgpu_gfx_rlc_enter_safe_mode(adev);
 
                result = smu7_enable_didt(hwmgr, false);
                PP_ASSERT_WITH_CODE((result == 0),
@@ -1046,12 +1046,12 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
                        PP_ASSERT_WITH_CODE((0 == result),
                                        "Failed to disable DPM DIDT.", goto error);
                }
-               adev->gfx.rlc.funcs->exit_safe_mode(adev);
+               amdgpu_gfx_rlc_exit_safe_mode(adev);
        }
 
        return 0;
 error:
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
        return result;
 }
 
index 99a33c33a32c9e47fb8bbaf455d8f44efe69c18f..101c09b212ade5690299c823f27aea1ae65cae4a 100644 (file)
@@ -713,20 +713,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
        for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
                table->WatermarkRow[1][i].MinClock =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz) /
-                       1000);
+                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
+                       1000));
                table->WatermarkRow[1][i].MaxClock =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) /
-                       1000);
+                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
+                       1000));
                table->WatermarkRow[1][i].MinUclk =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
-                       1000);
+                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+                       1000));
                table->WatermarkRow[1][i].MaxUclk =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
-                       1000);
+                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+                       1000));
                table->WatermarkRow[1][i].WmSetting = (uint8_t)
                                wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
        }
@@ -734,20 +734,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
        for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
                table->WatermarkRow[0][i].MinClock =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz) /
-                       1000);
+                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
+                       1000));
                table->WatermarkRow[0][i].MaxClock =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz) /
-                       1000);
+                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
+                       1000));
                table->WatermarkRow[0][i].MinUclk =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
-                       1000);
+                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+                       1000));
                table->WatermarkRow[0][i].MaxUclk =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
-                       1000);
+                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+                       1000));
                table->WatermarkRow[0][i].WmSetting = (uint8_t)
                                wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
        }
index 2d88abf97e7b4a69f21581af8bf2a5dda8b3ce1f..6f26cb241ecce5ddbef2c37b248ce3b3b0c59435 100644 (file)
@@ -937,7 +937,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
 
        num_se = adev->gfx.config.max_shader_engines;
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        mutex_lock(&adev->grbm_idx_mutex);
        for (count = 0; count < num_se; count++) {
@@ -962,7 +962,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
 
        vega10_didt_set_mask(hwmgr, true);
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 
        return 0;
 }
@@ -971,11 +971,11 @@ static int vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
 {
        struct amdgpu_device *adev = hwmgr->adev;
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        vega10_didt_set_mask(hwmgr, false);
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 
        return 0;
 }
@@ -988,7 +988,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
 
        num_se = adev->gfx.config.max_shader_engines;
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        mutex_lock(&adev->grbm_idx_mutex);
        for (count = 0; count < num_se; count++) {
@@ -1007,7 +1007,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
 
        vega10_didt_set_mask(hwmgr, true);
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 
        vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10);
        if (PP_CAP(PHM_PlatformCaps_GCEDC))
@@ -1024,11 +1024,11 @@ static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
        struct amdgpu_device *adev = hwmgr->adev;
        uint32_t data;
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        vega10_didt_set_mask(hwmgr, false);
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 
        if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
                data = 0x00000000;
@@ -1049,7 +1049,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
 
        num_se = adev->gfx.config.max_shader_engines;
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        mutex_lock(&adev->grbm_idx_mutex);
        for (count = 0; count < num_se; count++) {
@@ -1070,7 +1070,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
 
        vega10_didt_set_mask(hwmgr, true);
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 
        return 0;
 }
@@ -1079,11 +1079,11 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
 {
        struct amdgpu_device *adev = hwmgr->adev;
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        vega10_didt_set_mask(hwmgr, false);
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 
        return 0;
 }
@@ -1097,7 +1097,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
 
        num_se = adev->gfx.config.max_shader_engines;
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
 
@@ -1118,7 +1118,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
 
        vega10_didt_set_mask(hwmgr, true);
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 
        vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10);
 
@@ -1138,11 +1138,11 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
        struct amdgpu_device *adev = hwmgr->adev;
        uint32_t data;
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        vega10_didt_set_mask(hwmgr, false);
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 
        if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
                data = 0x00000000;
@@ -1160,7 +1160,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
        struct amdgpu_device *adev = hwmgr->adev;
        int result;
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        mutex_lock(&adev->grbm_idx_mutex);
        WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
@@ -1173,7 +1173,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
 
        vega10_didt_set_mask(hwmgr, false);
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 
        return 0;
 }
index 57143d51e3eed6b1dce0011748345cfece4c7165..f2daf00cc9119e15c61b7e084aeeda1b4895e4ef 100644 (file)
@@ -120,6 +120,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
        data->registry_data.disable_auto_wattman = 1;
        data->registry_data.auto_wattman_debug = 0;
        data->registry_data.auto_wattman_sample_period = 100;
+       data->registry_data.fclk_gfxclk_ratio = 0x3F6CCCCD;
        data->registry_data.auto_wattman_threshold = 50;
        data->registry_data.gfxoff_controlled_by_driver = 1;
        data->gfxoff_allowed = false;
@@ -829,6 +830,28 @@ static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr)
+{
+       struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
+
+       if (data->smu_features[GNLD_DPM_UCLK].enabled)
+               return smum_send_msg_to_smc_with_parameter(hwmgr,
+                       PPSMC_MSG_SetUclkFastSwitch,
+                       1);
+
+       return 0;
+}
+
+static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr)
+{
+       struct vega20_hwmgr *data =
+                       (struct vega20_hwmgr *)(hwmgr->backend);
+
+       return smum_send_msg_to_smc_with_parameter(hwmgr,
+                       PPSMC_MSG_SetFclkGfxClkRatio,
+                       data->registry_data.fclk_gfxclk_ratio);
+}
+
 static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
 {
        struct vega20_hwmgr *data =
@@ -1532,6 +1555,16 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
                        "[EnableDPMTasks] Failed to enable all smu features!",
                        return result);
 
+       result = vega20_notify_smc_display_change(hwmgr);
+       PP_ASSERT_WITH_CODE(!result,
+                       "[EnableDPMTasks] Failed to notify smc display change!",
+                       return result);
+
+       result = vega20_send_clock_ratio(hwmgr);
+       PP_ASSERT_WITH_CODE(!result,
+                       "[EnableDPMTasks] Failed to send clock ratio!",
+                       return result);
+
        /* Initialize UVD/VCE powergating state */
        vega20_init_powergate_state(hwmgr);
 
@@ -1972,19 +2005,6 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
        return ret;
 }
 
-static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr,
-               bool has_disp)
-{
-       struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
-
-       if (data->smu_features[GNLD_DPM_UCLK].enabled)
-               return smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_SetUclkFastSwitch,
-                       has_disp ? 1 : 0);
-
-       return 0;
-}
-
 int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
                struct pp_display_clock_request *clock_req)
 {
@@ -2044,13 +2064,6 @@ static int vega20_notify_smc_display_config_after_ps_adjustment(
        struct pp_display_clock_request clock_req;
        int ret = 0;
 
-       if ((hwmgr->display_config->num_display > 1) &&
-            !hwmgr->display_config->multi_monitor_in_sync &&
-            !hwmgr->display_config->nb_pstate_switch_disable)
-               vega20_notify_smc_display_change(hwmgr, false);
-       else
-               vega20_notify_smc_display_change(hwmgr, true);
-
        min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
        min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
        min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
@@ -2742,7 +2755,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
                for (i = 0; i < clocks.num_levels; i++)
                        size += sprintf(buf + size, "%d: %uMhz %s\n",
                                i, clocks.data[i].clocks_in_khz / 1000,
-                               (clocks.data[i].clocks_in_khz == now) ? "*" : "");
+                               (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
                break;
 
        case PP_MCLK:
@@ -2759,7 +2772,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
                for (i = 0; i < clocks.num_levels; i++)
                        size += sprintf(buf + size, "%d: %uMhz %s\n",
                                i, clocks.data[i].clocks_in_khz / 1000,
-                               (clocks.data[i].clocks_in_khz == now) ? "*" : "");
+                               (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
                break;
 
        case PP_PCIE:
@@ -3441,109 +3454,64 @@ static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
 
 static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
        /* init/fini related */
-       .backend_init =
-               vega20_hwmgr_backend_init,
-       .backend_fini =
-               vega20_hwmgr_backend_fini,
-       .asic_setup =
-               vega20_setup_asic_task,
-       .power_off_asic =
-               vega20_power_off_asic,
-       .dynamic_state_management_enable =
-               vega20_enable_dpm_tasks,
-       .dynamic_state_management_disable =
-               vega20_disable_dpm_tasks,
+       .backend_init = vega20_hwmgr_backend_init,
+       .backend_fini = vega20_hwmgr_backend_fini,
+       .asic_setup = vega20_setup_asic_task,
+       .power_off_asic = vega20_power_off_asic,
+       .dynamic_state_management_enable = vega20_enable_dpm_tasks,
+       .dynamic_state_management_disable = vega20_disable_dpm_tasks,
        /* power state related */
-       .apply_clocks_adjust_rules =
-               vega20_apply_clocks_adjust_rules,
-       .pre_display_config_changed =
-               vega20_pre_display_configuration_changed_task,
-       .display_config_changed =
-               vega20_display_configuration_changed_task,
+       .apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules,
+       .pre_display_config_changed = vega20_pre_display_configuration_changed_task,
+       .display_config_changed = vega20_display_configuration_changed_task,
        .check_smc_update_required_for_display_configuration =
                vega20_check_smc_update_required_for_display_configuration,
        .notify_smc_display_config_after_ps_adjustment =
                vega20_notify_smc_display_config_after_ps_adjustment,
        /* export to DAL */
-       .get_sclk =
-               vega20_dpm_get_sclk,
-       .get_mclk =
-               vega20_dpm_get_mclk,
-       .get_dal_power_level =
-               vega20_get_dal_power_level,
-       .get_clock_by_type_with_latency =
-               vega20_get_clock_by_type_with_latency,
-       .get_clock_by_type_with_voltage =
-               vega20_get_clock_by_type_with_voltage,
-       .set_watermarks_for_clocks_ranges =
-               vega20_set_watermarks_for_clocks_ranges,
-       .display_clock_voltage_request =
-               vega20_display_clock_voltage_request,
-       .get_performance_level =
-               vega20_get_performance_level,
+       .get_sclk = vega20_dpm_get_sclk,
+       .get_mclk = vega20_dpm_get_mclk,
+       .get_dal_power_level = vega20_get_dal_power_level,
+       .get_clock_by_type_with_latency = vega20_get_clock_by_type_with_latency,
+       .get_clock_by_type_with_voltage = vega20_get_clock_by_type_with_voltage,
+       .set_watermarks_for_clocks_ranges = vega20_set_watermarks_for_clocks_ranges,
+       .display_clock_voltage_request = vega20_display_clock_voltage_request,
+       .get_performance_level = vega20_get_performance_level,
        /* UMD pstate, profile related */
-       .force_dpm_level =
-               vega20_dpm_force_dpm_level,
-       .get_power_profile_mode =
-               vega20_get_power_profile_mode,
-       .set_power_profile_mode =
-               vega20_set_power_profile_mode,
+       .force_dpm_level = vega20_dpm_force_dpm_level,
+       .get_power_profile_mode = vega20_get_power_profile_mode,
+       .set_power_profile_mode = vega20_set_power_profile_mode,
        /* od related */
-       .set_power_limit =
-               vega20_set_power_limit,
-       .get_sclk_od =
-               vega20_get_sclk_od,
-       .set_sclk_od =
-               vega20_set_sclk_od,
-       .get_mclk_od =
-               vega20_get_mclk_od,
-       .set_mclk_od =
-               vega20_set_mclk_od,
-       .odn_edit_dpm_table =
-               vega20_odn_edit_dpm_table,
+       .set_power_limit = vega20_set_power_limit,
+       .get_sclk_od = vega20_get_sclk_od,
+       .set_sclk_od = vega20_set_sclk_od,
+       .get_mclk_od = vega20_get_mclk_od,
+       .set_mclk_od = vega20_set_mclk_od,
+       .odn_edit_dpm_table = vega20_odn_edit_dpm_table,
        /* for sysfs to retrive/set gfxclk/memclk */
-       .force_clock_level =
-               vega20_force_clock_level,
-       .print_clock_levels =
-               vega20_print_clock_levels,
-       .read_sensor =
-               vega20_read_sensor,
+       .force_clock_level = vega20_force_clock_level,
+       .print_clock_levels = vega20_print_clock_levels,
+       .read_sensor = vega20_read_sensor,
        /* powergate related */
-       .powergate_uvd =
-               vega20_power_gate_uvd,
-       .powergate_vce =
-               vega20_power_gate_vce,
+       .powergate_uvd = vega20_power_gate_uvd,
+       .powergate_vce = vega20_power_gate_vce,
        /* thermal related */
-       .start_thermal_controller =
-               vega20_start_thermal_controller,
-       .stop_thermal_controller =
-               vega20_thermal_stop_thermal_controller,
-       .get_thermal_temperature_range =
-               vega20_get_thermal_temperature_range,
-       .register_irq_handlers =
-               smu9_register_irq_handlers,
-       .disable_smc_firmware_ctf =
-               vega20_thermal_disable_alert,
+       .start_thermal_controller = vega20_start_thermal_controller,
+       .stop_thermal_controller = vega20_thermal_stop_thermal_controller,
+       .get_thermal_temperature_range = vega20_get_thermal_temperature_range,
+       .register_irq_handlers = smu9_register_irq_handlers,
+       .disable_smc_firmware_ctf = vega20_thermal_disable_alert,
        /* fan control related */
-       .get_fan_speed_percent =
-               vega20_fan_ctrl_get_fan_speed_percent,
-       .set_fan_speed_percent =
-               vega20_fan_ctrl_set_fan_speed_percent,
-       .get_fan_speed_info =
-               vega20_fan_ctrl_get_fan_speed_info,
-       .get_fan_speed_rpm =
-               vega20_fan_ctrl_get_fan_speed_rpm,
-       .set_fan_speed_rpm =
-               vega20_fan_ctrl_set_fan_speed_rpm,
-       .get_fan_control_mode =
-               vega20_get_fan_control_mode,
-       .set_fan_control_mode =
-               vega20_set_fan_control_mode,
+       .get_fan_speed_percent = vega20_fan_ctrl_get_fan_speed_percent,
+       .set_fan_speed_percent = vega20_fan_ctrl_set_fan_speed_percent,
+       .get_fan_speed_info = vega20_fan_ctrl_get_fan_speed_info,
+       .get_fan_speed_rpm = vega20_fan_ctrl_get_fan_speed_rpm,
+       .set_fan_speed_rpm = vega20_fan_ctrl_set_fan_speed_rpm,
+       .get_fan_control_mode = vega20_get_fan_control_mode,
+       .set_fan_control_mode = vega20_set_fan_control_mode,
        /* smu memory related */
-       .notify_cac_buffer_info =
-               vega20_notify_cac_buffer_info,
-       .enable_mgpu_fan_boost =
-               vega20_enable_mgpu_fan_boost,
+       .notify_cac_buffer_info = vega20_notify_cac_buffer_info,
+       .enable_mgpu_fan_boost = vega20_enable_mgpu_fan_boost,
 };
 
 int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
index 56fe6a0d42e804f956846faad473071335b3b887..25faaa5c5b10cbc5fcd720d7b15546a826641682 100644 (file)
@@ -328,6 +328,7 @@ struct vega20_registry_data {
        uint8_t   disable_auto_wattman;
        uint32_t  auto_wattman_debug;
        uint32_t  auto_wattman_sample_period;
+       uint32_t  fclk_gfxclk_ratio;
        uint8_t   auto_wattman_threshold;
        uint8_t   log_avfs_param;
        uint8_t   enable_enginess;
index e5a60aa44b5d6be780ca0a19ca5dd6b853f9bcd0..07d180ce4d18eab0ca66bafac8f88fcb0797add1 100644 (file)
@@ -28,7 +28,6 @@
 #include "hardwaremanager.h"
 #include "hwmgr_ppt.h"
 #include "ppatomctrl.h"
-#include "hwmgr_ppt.h"
 #include "power_state.h"
 #include "smu_helper.h"
 
index 65eb630bfea339cc75829aea7fc72d2def865e6c..94bf7b649c20b7ae1c1651c23820b8f5fd46929d 100644 (file)
 #include "gmc/gmc_8_1_d.h"
 #include "gmc/gmc_8_1_sh_mask.h"
 
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-
-
 #include "bif/bif_5_0_d.h"
 #include "bif/bif_5_0_sh_mask.h"
 
index 45d64a81e94539fe403087cd2827b2992e59edca..4f63a736ea0e7371b6f09b26ea8cc55ec6b9bdd0 100644 (file)
 #define PPSMC_MSG_SetSystemVirtualDramAddrHigh   0x4B
 #define PPSMC_MSG_SetSystemVirtualDramAddrLow    0x4C
 #define PPSMC_MSG_WaflTest                       0x4D
-// Unused ID 0x4E to 0x50
+#define PPSMC_MSG_SetFclkGfxClkRatio             0x4E
+// Unused ID 0x4F to 0x50
 #define PPSMC_MSG_AllowGfxOff                    0x51
 #define PPSMC_MSG_DisallowGfxOff                 0x52
 #define PPSMC_MSG_GetPptLimit                    0x53
index 872d3824337bf90ad8abbf4cce1eb9d77630c48a..2b2c266169023d44e52bd92dd09ab07e579fe7ac 100644 (file)
@@ -44,7 +44,6 @@
 
 #include "smu7_hwmgr.h"
 #include "hardwaremanager.h"
-#include "ppatomctrl.h"
 #include "atombios.h"
 #include "pppcielanes.h"
 
index d0eb8ab5014812c490efcdb7c0181687e6371bbd..d111dd4e03d7d2442677a6a4cabf0ab5bc924a36 100644 (file)
@@ -29,7 +29,6 @@
 #include "rv_ppsmc.h"
 #include "smu10_driver_if.h"
 #include "smu10.h"
-#include "ppatomctrl.h"
 #include "pp_debug.h"
 
 
index 09b844ec3eabae4f09f8c0d10ed84d53fcc75ee2..e2787e14a50086831f619427b070029e31dba0c6 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/delay.h>
 #include <linux/gfp.h>
 #include <linux/kernel.h>
+#include <linux/ktime.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 
@@ -61,9 +62,13 @@ static uint32_t smu8_get_argument(struct pp_hwmgr *hwmgr)
                                        mmSMU_MP1_SRBM2P_ARG_0);
 }
 
-static int smu8_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
+/* Send a message to the SMC, and wait for its response.*/
+static int smu8_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+                                           uint16_t msg, uint32_t parameter)
 {
        int result = 0;
+       ktime_t t_start;
+       s64 elapsed_us;
 
        if (hwmgr == NULL || hwmgr->device == NULL)
                return -EINVAL;
@@ -74,28 +79,31 @@ static int smu8_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
                /* Read the last message to SMU, to report actual cause */
                uint32_t val = cgs_read_register(hwmgr->device,
                                                 mmSMU_MP1_SRBM2P_MSG_0);
-               pr_err("smu8_send_msg_to_smc_async (0x%04x) failed\n", msg);
-               pr_err("SMU still servicing msg (0x%04x)\n", val);
+               pr_err("%s(0x%04x) aborted; SMU still servicing msg (0x%04x)\n",
+                       __func__, msg, val);
                return result;
        }
+       t_start = ktime_get();
+
+       cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
 
        cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
        cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
 
-       return 0;
+       result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
+                                       SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
+
+       elapsed_us = ktime_us_delta(ktime_get(), t_start);
+
+       WARN(result, "%s(0x%04x, %#x) timed out after %lld us\n",
+                       __func__, msg, parameter, elapsed_us);
+
+       return result;
 }
 
-/* Send a message to the SMC, and wait for its response.*/
 static int smu8_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
 {
-       int result = 0;
-
-       result = smu8_send_msg_to_smc_async(hwmgr, msg);
-       if (result != 0)
-               return result;
-
-       return PHM_WAIT_FIELD_UNEQUAL(hwmgr,
-                                       SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
+       return smu8_send_msg_to_smc_with_parameter(hwmgr, msg, 0);
 }
 
 static int smu8_set_smc_sram_address(struct pp_hwmgr *hwmgr,
@@ -135,17 +143,6 @@ static int smu8_write_smc_sram_dword(struct pp_hwmgr *hwmgr,
        return result;
 }
 
-static int smu8_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
-                                         uint16_t msg, uint32_t parameter)
-{
-       if (hwmgr == NULL || hwmgr->device == NULL)
-               return -EINVAL;
-
-       cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
-
-       return smu8_send_msg_to_smc(hwmgr, msg);
-}
-
 static int smu8_check_fw_load_finish(struct pp_hwmgr *hwmgr,
                                   uint32_t firmware)
 {
@@ -737,6 +734,10 @@ static int smu8_start_smu(struct pp_hwmgr *hwmgr)
 
        cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
        hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
+       pr_info("smu version %02d.%02d.%02d\n",
+               ((hwmgr->smu_version >> 16) & 0xFF),
+               ((hwmgr->smu_version >> 8) & 0xFF),
+               (hwmgr->smu_version & 0xFF));
        adev->pm.fw_version = hwmgr->smu_version >> 8;
 
        return smu8_request_smu_load_fw(hwmgr);
index 99d5e4f98f49cd7ec103a70eee060b8c9e0241e4..a6edd5df33b0fa0cf9b4b3ed8dd694ba9898b14b 100644 (file)
@@ -37,10 +37,13 @@ MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_k2_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_k2_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_k_smc.bin");
 MODULE_FIRMWARE("amdgpu/vegam_smc.bin");
 MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
 MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
index 9f71512b25109d53afeba1b3c3a3429b05d0228d..1e69300f6175666b585741a25fb5937fdd3a9203 100644 (file)
@@ -40,7 +40,6 @@
 
 #include "smu7_hwmgr.h"
 #include "hardwaremanager.h"
-#include "ppatomctrl.h"
 #include "atombios.h"
 #include "pppcielanes.h"
 
index e6c4cd3dc50ec7540d65efd61fea71ddb249ecdd..bfc65040dfcbf702c4420f75105cce38be77254a 100644 (file)
@@ -104,8 +104,6 @@ struct ast_private {
        int fb_mtrr;
 
        struct {
-               struct drm_global_reference mem_global_ref;
-               struct ttm_bo_global_ref bo_global_ref;
                struct ttm_bo_device bdev;
        } ttm;
 
index fe354ebf374d236029a3b671fdafe7d0c3af5aa4..c168d62fe8f9742e280fa287d38fe79a8c98554f 100644 (file)
@@ -36,63 +36,6 @@ ast_bdev(struct ttm_bo_device *bd)
        return container_of(bd, struct ast_private, ttm.bdev);
 }
 
-static int
-ast_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-       return ttm_mem_global_init(ref->object);
-}
-
-static void
-ast_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-       ttm_mem_global_release(ref->object);
-}
-
-static int ast_ttm_global_init(struct ast_private *ast)
-{
-       struct drm_global_reference *global_ref;
-       int r;
-
-       global_ref = &ast->ttm.mem_global_ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-       global_ref->size = sizeof(struct ttm_mem_global);
-       global_ref->init = &ast_ttm_mem_global_init;
-       global_ref->release = &ast_ttm_mem_global_release;
-       r = drm_global_item_ref(global_ref);
-       if (r != 0) {
-               DRM_ERROR("Failed setting up TTM memory accounting "
-                         "subsystem.\n");
-               return r;
-       }
-
-       ast->ttm.bo_global_ref.mem_glob =
-               ast->ttm.mem_global_ref.object;
-       global_ref = &ast->ttm.bo_global_ref.ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_BO;
-       global_ref->size = sizeof(struct ttm_bo_global);
-       global_ref->init = &ttm_bo_global_init;
-       global_ref->release = &ttm_bo_global_release;
-       r = drm_global_item_ref(global_ref);
-       if (r != 0) {
-               DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-               drm_global_item_unref(&ast->ttm.mem_global_ref);
-               return r;
-       }
-       return 0;
-}
-
-static void
-ast_ttm_global_release(struct ast_private *ast)
-{
-       if (ast->ttm.mem_global_ref.release == NULL)
-               return;
-
-       drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
-       drm_global_item_unref(&ast->ttm.mem_global_ref);
-       ast->ttm.mem_global_ref.release = NULL;
-}
-
-
 static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo)
 {
        struct ast_bo *bo;
@@ -232,12 +175,7 @@ int ast_mm_init(struct ast_private *ast)
        struct drm_device *dev = ast->dev;
        struct ttm_bo_device *bdev = &ast->ttm.bdev;
 
-       ret = ast_ttm_global_init(ast);
-       if (ret)
-               return ret;
-
        ret = ttm_bo_device_init(&ast->ttm.bdev,
-                                ast->ttm.bo_global_ref.ref.object,
                                 &ast_bo_driver,
                                 dev->anon_inode->i_mapping,
                                 DRM_FILE_PAGE_OFFSET,
@@ -268,8 +206,6 @@ void ast_mm_fini(struct ast_private *ast)
 
        ttm_bo_device_release(&ast->ttm.bdev);
 
-       ast_ttm_global_release(ast);
-
        arch_phys_wc_del(ast->fb_mtrr);
        arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
                                pci_resource_len(dev->pdev, 0));
index 577a8b917cb912e2884cc13431334187049fda73..fb38c8b857b5a26a7e40d2469d00fe3ce02546ed 100644 (file)
@@ -77,8 +77,6 @@ struct bochs_device {
 
        /* ttm */
        struct {
-               struct drm_global_reference mem_global_ref;
-               struct ttm_bo_global_ref bo_global_ref;
                struct ttm_bo_device bdev;
                bool initialized;
        } ttm;
index e6ccf7fa92d4997bf408dd425fc57ae41cc2a456..0980411e41bf0d7d61fa173b4199253795aa2966 100644 (file)
@@ -16,61 +16,6 @@ static inline struct bochs_device *bochs_bdev(struct ttm_bo_device *bd)
        return container_of(bd, struct bochs_device, ttm.bdev);
 }
 
-static int bochs_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-       return ttm_mem_global_init(ref->object);
-}
-
-static void bochs_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-       ttm_mem_global_release(ref->object);
-}
-
-static int bochs_ttm_global_init(struct bochs_device *bochs)
-{
-       struct drm_global_reference *global_ref;
-       int r;
-
-       global_ref = &bochs->ttm.mem_global_ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-       global_ref->size = sizeof(struct ttm_mem_global);
-       global_ref->init = &bochs_ttm_mem_global_init;
-       global_ref->release = &bochs_ttm_mem_global_release;
-       r = drm_global_item_ref(global_ref);
-       if (r != 0) {
-               DRM_ERROR("Failed setting up TTM memory accounting "
-                         "subsystem.\n");
-               return r;
-       }
-
-       bochs->ttm.bo_global_ref.mem_glob =
-               bochs->ttm.mem_global_ref.object;
-       global_ref = &bochs->ttm.bo_global_ref.ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_BO;
-       global_ref->size = sizeof(struct ttm_bo_global);
-       global_ref->init = &ttm_bo_global_init;
-       global_ref->release = &ttm_bo_global_release;
-       r = drm_global_item_ref(global_ref);
-       if (r != 0) {
-               DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-               drm_global_item_unref(&bochs->ttm.mem_global_ref);
-               return r;
-       }
-
-       return 0;
-}
-
-static void bochs_ttm_global_release(struct bochs_device *bochs)
-{
-       if (bochs->ttm.mem_global_ref.release == NULL)
-               return;
-
-       drm_global_item_unref(&bochs->ttm.bo_global_ref.ref);
-       drm_global_item_unref(&bochs->ttm.mem_global_ref);
-       bochs->ttm.mem_global_ref.release = NULL;
-}
-
-
 static void bochs_bo_ttm_destroy(struct ttm_buffer_object *tbo)
 {
        struct bochs_bo *bo;
@@ -208,12 +153,7 @@ int bochs_mm_init(struct bochs_device *bochs)
        struct ttm_bo_device *bdev = &bochs->ttm.bdev;
        int ret;
 
-       ret = bochs_ttm_global_init(bochs);
-       if (ret)
-               return ret;
-
        ret = ttm_bo_device_init(&bochs->ttm.bdev,
-                                bochs->ttm.bo_global_ref.ref.object,
                                 &bochs_bo_driver,
                                 bochs->dev->anon_inode->i_mapping,
                                 DRM_FILE_PAGE_OFFSET,
@@ -240,7 +180,6 @@ void bochs_mm_fini(struct bochs_device *bochs)
                return;
 
        ttm_bo_device_release(&bochs->ttm.bdev);
-       bochs_ttm_global_release(bochs);
        bochs->ttm.initialized = false;
 }
 
index a29f87e98d9d2a224cfb699528e38f4cbdb44c7b..f2b2e0d169fabb73a8c55a6ab459720799a682cf 100644 (file)
@@ -136,8 +136,6 @@ struct cirrus_device {
        int fb_mtrr;
 
        struct {
-               struct drm_global_reference mem_global_ref;
-               struct ttm_bo_global_ref bo_global_ref;
                struct ttm_bo_device bdev;
        } ttm;
        bool mm_inited;
index f2195324379039a651b21c2fec0a1209f35800c0..e075810b4bd4063af7334d0543123c28c44198a9 100644 (file)
@@ -36,63 +36,6 @@ cirrus_bdev(struct ttm_bo_device *bd)
        return container_of(bd, struct cirrus_device, ttm.bdev);
 }
 
-static int
-cirrus_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-       return ttm_mem_global_init(ref->object);
-}
-
-static void
-cirrus_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-       ttm_mem_global_release(ref->object);
-}
-
-static int cirrus_ttm_global_init(struct cirrus_device *cirrus)
-{
-       struct drm_global_reference *global_ref;
-       int r;
-
-       global_ref = &cirrus->ttm.mem_global_ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-       global_ref->size = sizeof(struct ttm_mem_global);
-       global_ref->init = &cirrus_ttm_mem_global_init;
-       global_ref->release = &cirrus_ttm_mem_global_release;
-       r = drm_global_item_ref(global_ref);
-       if (r != 0) {
-               DRM_ERROR("Failed setting up TTM memory accounting "
-                         "subsystem.\n");
-               return r;
-       }
-
-       cirrus->ttm.bo_global_ref.mem_glob =
-               cirrus->ttm.mem_global_ref.object;
-       global_ref = &cirrus->ttm.bo_global_ref.ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_BO;
-       global_ref->size = sizeof(struct ttm_bo_global);
-       global_ref->init = &ttm_bo_global_init;
-       global_ref->release = &ttm_bo_global_release;
-       r = drm_global_item_ref(global_ref);
-       if (r != 0) {
-               DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-               drm_global_item_unref(&cirrus->ttm.mem_global_ref);
-               return r;
-       }
-       return 0;
-}
-
-static void
-cirrus_ttm_global_release(struct cirrus_device *cirrus)
-{
-       if (cirrus->ttm.mem_global_ref.release == NULL)
-               return;
-
-       drm_global_item_unref(&cirrus->ttm.bo_global_ref.ref);
-       drm_global_item_unref(&cirrus->ttm.mem_global_ref);
-       cirrus->ttm.mem_global_ref.release = NULL;
-}
-
-
 static void cirrus_bo_ttm_destroy(struct ttm_buffer_object *tbo)
 {
        struct cirrus_bo *bo;
@@ -232,12 +175,7 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
        struct drm_device *dev = cirrus->dev;
        struct ttm_bo_device *bdev = &cirrus->ttm.bdev;
 
-       ret = cirrus_ttm_global_init(cirrus);
-       if (ret)
-               return ret;
-
        ret = ttm_bo_device_init(&cirrus->ttm.bdev,
-                                cirrus->ttm.bo_global_ref.ref.object,
                                 &cirrus_bo_driver,
                                 dev->anon_inode->i_mapping,
                                 DRM_FILE_PAGE_OFFSET,
@@ -273,8 +211,6 @@ void cirrus_mm_fini(struct cirrus_device *cirrus)
 
        ttm_bo_device_release(&cirrus->ttm.bdev);
 
-       cirrus_ttm_global_release(cirrus);
-
        arch_phys_wc_del(cirrus->fb_mtrr);
        cirrus->fb_mtrr = 0;
        arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
index 7e23b150ca80b97b571152fa2e5e73abdde6d3f8..9ac26437051b478b8d9a93da40c93edbaa87a912 100644 (file)
@@ -398,6 +398,11 @@ static int drm_atomic_connector_check(struct drm_connector *connector,
 {
        struct drm_crtc_state *crtc_state;
        struct drm_writeback_job *writeback_job = state->writeback_job;
+       const struct drm_display_info *info = &connector->display_info;
+
+       state->max_bpc = info->bpc ? info->bpc : 8;
+       if (connector->max_bpc_property)
+               state->max_bpc = min(state->max_bpc, state->max_requested_bpc);
 
        if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job)
                return 0;
index fe8dd8aa4ae409c55240d89bc9bdf7d94103570d..bc9fc9665614f0904f6184be66903d71f93ff9dc 100644 (file)
@@ -669,6 +669,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
                        if (old_connector_state->link_status !=
                            new_connector_state->link_status)
                                new_crtc_state->connectors_changed = true;
+
+                       if (old_connector_state->max_requested_bpc !=
+                           new_connector_state->max_requested_bpc)
+                               new_crtc_state->connectors_changed = true;
                }
 
                if (funcs->atomic_check)
index d5b7f315098c23b2ddc0c0ab74e410396e0ad917..86ac33922b09050ccf9e61779e41cf3f73ab69ae 100644 (file)
@@ -740,6 +740,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
 
                return set_out_fence_for_connector(state->state, connector,
                                                   fence_ptr);
+       } else if (property == connector->max_bpc_property) {
+               state->max_requested_bpc = val;
        } else if (connector->funcs->atomic_set_property) {
                return connector->funcs->atomic_set_property(connector,
                                state, property, val);
@@ -804,6 +806,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
                *val = 0;
        } else if (property == config->writeback_out_fence_ptr_property) {
                *val = 0;
+       } else if (property == connector->max_bpc_property) {
+               *val = state->max_requested_bpc;
        } else if (connector->funcs->atomic_get_property) {
                return connector->funcs->atomic_get_property(connector,
                                state, property, val);
index aa18b1d7d3e4df837de8561ae10322d31ff5159c..fa9baacc863bda92bc070b7f815909ad1d3c6be1 100644 (file)
@@ -932,6 +932,13 @@ DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
  *       is no longer protected and userspace should take appropriate action
  *       (whatever that might be).
  *
+ * max bpc:
+ *     This range property is used by userspace to limit the bit depth. When
+ *     used the driver would limit the bpc in accordance with the valid range
+ *     supported by the hardware and sink. Drivers to use the function
+ *     drm_connector_attach_max_bpc_property() to create and attach the
+ *     property to the connector during initialization.
+ *
  * Connectors also have one standardized atomic property:
  *
  * CRTC_ID:
@@ -1599,6 +1606,40 @@ void drm_connector_set_link_status_property(struct drm_connector *connector,
 }
 EXPORT_SYMBOL(drm_connector_set_link_status_property);
 
+/**
+ * drm_connector_attach_max_bpc_property - attach "max bpc" property
+ * @connector: connector to attach max bpc property on.
+ * @min: The minimum bit depth supported by the connector.
+ * @max: The maximum bit depth supported by the connector.
+ *
+ * This is used to add support for limiting the bit depth on a connector.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
+                                         int min, int max)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_property *prop;
+
+       prop = connector->max_bpc_property;
+       if (!prop) {
+               prop = drm_property_create_range(dev, 0, "max bpc", min, max);
+               if (!prop)
+                       return -ENOMEM;
+
+               connector->max_bpc_property = prop;
+       }
+
+       drm_object_attach_property(&connector->base, prop, max);
+       connector->state->max_requested_bpc = max;
+       connector->state->max_bpc = max;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_max_bpc_property);
+
 /**
  * drm_connector_init_panel_orientation_property -
  *     initialize the connecters panel_orientation property
index 37c01b6076ec42abf14a34128eb4409649d399e7..6d483487f2b487965d5d3ee129c8cd2b603cdeb6 100644 (file)
@@ -1352,3 +1352,93 @@ int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
        return 0;
 }
 EXPORT_SYMBOL(drm_dp_read_desc);
+
+/**
+ * DRM DP Helpers for DSC
+ */
+u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+                                  bool is_edp)
+{
+       u8 slice_cap1 = dsc_dpcd[DP_DSC_SLICE_CAP_1 - DP_DSC_SUPPORT];
+
+       if (is_edp) {
+               /* For eDP, register DSC_SLICE_CAPABILITIES_1 gives slice count */
+               if (slice_cap1 & DP_DSC_4_PER_DP_DSC_SINK)
+                       return 4;
+               if (slice_cap1 & DP_DSC_2_PER_DP_DSC_SINK)
+                       return 2;
+               if (slice_cap1 & DP_DSC_1_PER_DP_DSC_SINK)
+                       return 1;
+       } else {
+               /* For DP, use values from DSC_SLICE_CAP_1 and DSC_SLICE_CAP2 */
+               u8 slice_cap2 = dsc_dpcd[DP_DSC_SLICE_CAP_2 - DP_DSC_SUPPORT];
+
+               if (slice_cap2 & DP_DSC_24_PER_DP_DSC_SINK)
+                       return 24;
+               if (slice_cap2 & DP_DSC_20_PER_DP_DSC_SINK)
+                       return 20;
+               if (slice_cap2 & DP_DSC_16_PER_DP_DSC_SINK)
+                       return 16;
+               if (slice_cap1 & DP_DSC_12_PER_DP_DSC_SINK)
+                       return 12;
+               if (slice_cap1 & DP_DSC_10_PER_DP_DSC_SINK)
+                       return 10;
+               if (slice_cap1 & DP_DSC_8_PER_DP_DSC_SINK)
+                       return 8;
+               if (slice_cap1 & DP_DSC_6_PER_DP_DSC_SINK)
+                       return 6;
+               if (slice_cap1 & DP_DSC_4_PER_DP_DSC_SINK)
+                       return 4;
+               if (slice_cap1 & DP_DSC_2_PER_DP_DSC_SINK)
+                       return 2;
+               if (slice_cap1 & DP_DSC_1_PER_DP_DSC_SINK)
+                       return 1;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_dp_dsc_sink_max_slice_count);
+
+u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+       u8 line_buf_depth = dsc_dpcd[DP_DSC_LINE_BUF_BIT_DEPTH - DP_DSC_SUPPORT];
+
+       switch (line_buf_depth & DP_DSC_LINE_BUF_BIT_DEPTH_MASK) {
+       case DP_DSC_LINE_BUF_BIT_DEPTH_9:
+               return 9;
+       case DP_DSC_LINE_BUF_BIT_DEPTH_10:
+               return 10;
+       case DP_DSC_LINE_BUF_BIT_DEPTH_11:
+               return 11;
+       case DP_DSC_LINE_BUF_BIT_DEPTH_12:
+               return 12;
+       case DP_DSC_LINE_BUF_BIT_DEPTH_13:
+               return 13;
+       case DP_DSC_LINE_BUF_BIT_DEPTH_14:
+               return 14;
+       case DP_DSC_LINE_BUF_BIT_DEPTH_15:
+               return 15;
+       case DP_DSC_LINE_BUF_BIT_DEPTH_16:
+               return 16;
+       case DP_DSC_LINE_BUF_BIT_DEPTH_8:
+               return 8;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_dp_dsc_sink_line_buf_depth);
+
+u8 drm_dp_dsc_sink_max_color_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+       u8 color_depth = dsc_dpcd[DP_DSC_DEC_COLOR_DEPTH_CAP - DP_DSC_SUPPORT];
+
+       if (color_depth & DP_DSC_12_BPC)
+               return 12;
+       if (color_depth & DP_DSC_10_BPC)
+               return 10;
+       if (color_depth & DP_DSC_8_BPC)
+               return 8;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_dp_dsc_sink_max_color_depth);
index 8c3cfac437f431e5dfe2792f61efc45b39e0896d..5294145569625df3e8d7fc05c5ccc5e405ef62b1 100644 (file)
@@ -1275,6 +1275,9 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
        mutex_lock(&mgr->lock);
        mstb = mgr->mst_primary;
 
+       if (!mstb)
+               goto out;
+
        for (i = 0; i < lct - 1; i++) {
                int shift = (i % 2) ? 0 : 4;
                int port_num = (rad[i / 2] >> shift) & 0xf;
index 5f7e99bf4fa446d62f16894578ba11ca99f9a596..12e5e2be7890e39952947f85594675093f4e8eb9 100644 (file)
@@ -960,14 +960,12 @@ static void drm_core_exit(void)
        drm_sysfs_destroy();
        idr_destroy(&drm_minors_idr);
        drm_connector_ida_destroy();
-       drm_global_release();
 }
 
 static int __init drm_core_init(void)
 {
        int ret;
 
-       drm_global_init();
        drm_connector_ida_init();
        idr_init(&drm_minors_idr);
 
diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
deleted file mode 100644 (file)
index 5799e27..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/**************************************************************************
- *
- * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <drm/drm_global.h>
-
-struct drm_global_item {
-       struct mutex mutex;
-       void *object;
-       int refcount;
-};
-
-static struct drm_global_item glob[DRM_GLOBAL_NUM];
-
-void drm_global_init(void)
-{
-       int i;
-
-       for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
-               struct drm_global_item *item = &glob[i];
-               mutex_init(&item->mutex);
-               item->object = NULL;
-               item->refcount = 0;
-       }
-}
-
-void drm_global_release(void)
-{
-       int i;
-       for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
-               struct drm_global_item *item = &glob[i];
-               BUG_ON(item->object != NULL);
-               BUG_ON(item->refcount != 0);
-       }
-}
-
-/**
- * drm_global_item_ref - Initialize and acquire reference to memory
- * object
- * @ref: Object for initialization
- *
- * This initializes a memory object, allocating memory and calling the
- * .init() hook. Further calls will increase the reference count for
- * that item.
- *
- * Returns:
- * Zero on success, non-zero otherwise.
- */
-int drm_global_item_ref(struct drm_global_reference *ref)
-{
-       int ret = 0;
-       struct drm_global_item *item = &glob[ref->global_type];
-
-       mutex_lock(&item->mutex);
-       if (item->refcount == 0) {
-               ref->object = kzalloc(ref->size, GFP_KERNEL);
-               if (unlikely(ref->object == NULL)) {
-                       ret = -ENOMEM;
-                       goto error_unlock;
-               }
-               ret = ref->init(ref);
-               if (unlikely(ret != 0))
-                       goto error_free;
-
-               item->object = ref->object;
-       } else {
-               ref->object = item->object;
-       }
-
-       ++item->refcount;
-       mutex_unlock(&item->mutex);
-       return 0;
-
-error_free:
-       kfree(ref->object);
-       ref->object = NULL;
-error_unlock:
-       mutex_unlock(&item->mutex);
-       return ret;
-}
-EXPORT_SYMBOL(drm_global_item_ref);
-
-/**
- * drm_global_item_unref - Drop reference to memory
- * object
- * @ref: Object being removed
- *
- * Drop a reference to the memory object and eventually call the
- * release() hook.  The allocated object should be dropped in the
- * release() hook or before calling this function
- *
- */
-
-void drm_global_item_unref(struct drm_global_reference *ref)
-{
-       struct drm_global_item *item = &glob[ref->global_type];
-
-       mutex_lock(&item->mutex);
-       BUG_ON(item->refcount == 0);
-       BUG_ON(ref->object != item->object);
-       if (--item->refcount == 0) {
-               ref->release(ref);
-               item->object = NULL;
-       }
-       mutex_unlock(&item->mutex);
-}
-EXPORT_SYMBOL(drm_global_item_unref);
-
index e7c3ed6c9a2e10ddcd7665e851a1bffb9ff0247f..49a6763693f1ab372d4eeea89f72cd04bbf63442 100644 (file)
@@ -93,7 +93,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
         * If the GPU managed to complete this jobs fence, the timout is
         * spurious. Bail out.
         */
-       if (fence_completed(gpu, submit->out_fence->seqno))
+       if (dma_fence_is_signaled(submit->out_fence))
                return;
 
        /*
@@ -105,8 +105,6 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
        change = dma_addr - gpu->hangcheck_dma_addr;
        if (change < 0 || change > 16) {
                gpu->hangcheck_dma_addr = dma_addr;
-               schedule_delayed_work(&sched_job->sched->work_tdr,
-                                     sched_job->sched->timeout);
                return;
        }
 
@@ -127,6 +125,8 @@ static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
 {
        struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
 
+       drm_sched_job_cleanup(sched_job);
+
        etnaviv_submit_put(submit);
 }
 
@@ -159,6 +159,7 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
                                                submit->out_fence, 0,
                                                INT_MAX, GFP_KERNEL);
        if (submit->out_fence_id < 0) {
+               drm_sched_job_cleanup(&submit->sched_job);
                ret = -ENOMEM;
                goto out_unlock;
        }
index 94529aa8233922b71cc36011fff305280651be53..aef487dd873153d77fd602726ab6bd92256ab593 100644 (file)
@@ -164,13 +164,6 @@ static u32 decon_get_frame_count(struct decon_context *ctx, bool end)
        return frm;
 }
 
-static u32 decon_get_vblank_counter(struct exynos_drm_crtc *crtc)
-{
-       struct decon_context *ctx = crtc->ctx;
-
-       return decon_get_frame_count(ctx, false);
-}
-
 static void decon_setup_trigger(struct decon_context *ctx)
 {
        if (!ctx->crtc->i80_mode && !(ctx->out_type & I80_HW_TRG))
@@ -536,7 +529,6 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = {
        .disable                = decon_disable,
        .enable_vblank          = decon_enable_vblank,
        .disable_vblank         = decon_disable_vblank,
-       .get_vblank_counter     = decon_get_vblank_counter,
        .atomic_begin           = decon_atomic_begin,
        .update_plane           = decon_update_plane,
        .disable_plane          = decon_disable_plane,
@@ -554,7 +546,6 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
        int ret;
 
        ctx->drm_dev = drm_dev;
-       drm_dev->max_vblank_count = 0xffffffff;
 
        for (win = ctx->first_win; win < WINDOWS_NR; win++) {
                ctx->configs[win].pixel_formats = decon_formats;
index eea90251808fa2e58398fdcb1cac01d160307320..2696289ecc78f204fb504f24c4f897694acb41df 100644 (file)
@@ -162,16 +162,6 @@ static void exynos_drm_crtc_disable_vblank(struct drm_crtc *crtc)
                exynos_crtc->ops->disable_vblank(exynos_crtc);
 }
 
-static u32 exynos_drm_crtc_get_vblank_counter(struct drm_crtc *crtc)
-{
-       struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
-
-       if (exynos_crtc->ops->get_vblank_counter)
-               return exynos_crtc->ops->get_vblank_counter(exynos_crtc);
-
-       return 0;
-}
-
 static const struct drm_crtc_funcs exynos_crtc_funcs = {
        .set_config     = drm_atomic_helper_set_config,
        .page_flip      = drm_atomic_helper_page_flip,
@@ -181,7 +171,6 @@ static const struct drm_crtc_funcs exynos_crtc_funcs = {
        .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
        .enable_vblank = exynos_drm_crtc_enable_vblank,
        .disable_vblank = exynos_drm_crtc_disable_vblank,
-       .get_vblank_counter = exynos_drm_crtc_get_vblank_counter,
 };
 
 struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
index ec9604f1272b50d12b44a9f759693dc4a8eaebf6..5e61e707f95555da181969aa71254fc71ffa4098 100644 (file)
@@ -135,7 +135,6 @@ struct exynos_drm_crtc_ops {
        void (*disable)(struct exynos_drm_crtc *crtc);
        int (*enable_vblank)(struct exynos_drm_crtc *crtc);
        void (*disable_vblank)(struct exynos_drm_crtc *crtc);
-       u32 (*get_vblank_counter)(struct exynos_drm_crtc *crtc);
        enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc,
                const struct drm_display_mode *mode);
        bool (*mode_fixup)(struct exynos_drm_crtc *crtc,
index 07af7758066db47c866a86a2be8fdfe5386421a5..d81e62ae286aea79d39757ecb2233608b905d75f 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
 #include <drm/drm_mipi_dsi.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_atomic_helper.h>
@@ -1474,12 +1475,12 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
 {
        struct exynos_dsi *dsi = encoder_to_dsi(encoder);
        struct drm_connector *connector = &dsi->connector;
+       struct drm_device *drm = encoder->dev;
        int ret;
 
        connector->polled = DRM_CONNECTOR_POLL_HPD;
 
-       ret = drm_connector_init(encoder->dev, connector,
-                                &exynos_dsi_connector_funcs,
+       ret = drm_connector_init(drm, connector, &exynos_dsi_connector_funcs,
                                 DRM_MODE_CONNECTOR_DSI);
        if (ret) {
                DRM_ERROR("Failed to initialize connector with drm\n");
@@ -1489,7 +1490,12 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
        connector->status = connector_status_disconnected;
        drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs);
        drm_connector_attach_encoder(connector, encoder);
+       if (!drm->registered)
+               return 0;
 
+       connector->funcs->reset(connector);
+       drm_fb_helper_add_one_connector(drm->fb_helper, connector);
+       drm_connector_register(connector);
        return 0;
 }
 
@@ -1527,7 +1533,9 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
                }
 
                dsi->panel = of_drm_find_panel(device->dev.of_node);
-               if (dsi->panel) {
+               if (IS_ERR(dsi->panel)) {
+                       dsi->panel = NULL;
+               } else {
                        drm_panel_attach(dsi->panel, &dsi->connector);
                        dsi->connector.status = connector_status_connected;
                }
index 918dd2c822098444c6708761baded3ef95420025..01d182289efa38fd75a83cd399b02bbfd1e590c0 100644 (file)
@@ -192,7 +192,7 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
        struct drm_fb_helper *helper;
        int ret;
 
-       if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
+       if (!dev->mode_config.num_crtc)
                return 0;
 
        fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
index 45c25a488f429f58b126de5ce1a5c2ef6c4aa8d5..3c168ae77b0cdf537222ae48c3c383b15533995a 100644 (file)
@@ -49,8 +49,6 @@ struct hibmc_drm_private {
        bool mode_config_initialized;
 
        /* ttm */
-       struct drm_global_reference mem_global_ref;
-       struct ttm_bo_global_ref bo_global_ref;
        struct ttm_bo_device bdev;
        bool initialized;
 
index 2e3e0bdb8932f8a210b698a1adb8d8a089e91e2b..dd383267884cfe46fd00f5149464dc0efbff59c6 100644 (file)
@@ -29,55 +29,6 @@ hibmc_bdev(struct ttm_bo_device *bd)
        return container_of(bd, struct hibmc_drm_private, bdev);
 }
 
-static int
-hibmc_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-       return ttm_mem_global_init(ref->object);
-}
-
-static void
-hibmc_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-       ttm_mem_global_release(ref->object);
-}
-
-static int hibmc_ttm_global_init(struct hibmc_drm_private *hibmc)
-{
-       int ret;
-
-       hibmc->mem_global_ref.global_type = DRM_GLOBAL_TTM_MEM;
-       hibmc->mem_global_ref.size = sizeof(struct ttm_mem_global);
-       hibmc->mem_global_ref.init = &hibmc_ttm_mem_global_init;
-       hibmc->mem_global_ref.release = &hibmc_ttm_mem_global_release;
-       ret = drm_global_item_ref(&hibmc->mem_global_ref);
-       if (ret) {
-               DRM_ERROR("could not get ref on ttm global: %d\n", ret);
-               return ret;
-       }
-
-       hibmc->bo_global_ref.mem_glob =
-               hibmc->mem_global_ref.object;
-       hibmc->bo_global_ref.ref.global_type = DRM_GLOBAL_TTM_BO;
-       hibmc->bo_global_ref.ref.size = sizeof(struct ttm_bo_global);
-       hibmc->bo_global_ref.ref.init = &ttm_bo_global_init;
-       hibmc->bo_global_ref.ref.release = &ttm_bo_global_release;
-       ret = drm_global_item_ref(&hibmc->bo_global_ref.ref);
-       if (ret) {
-               DRM_ERROR("failed setting up TTM BO subsystem: %d\n", ret);
-               drm_global_item_unref(&hibmc->mem_global_ref);
-               return ret;
-       }
-       return 0;
-}
-
-static void
-hibmc_ttm_global_release(struct hibmc_drm_private *hibmc)
-{
-       drm_global_item_unref(&hibmc->bo_global_ref.ref);
-       drm_global_item_unref(&hibmc->mem_global_ref);
-       hibmc->mem_global_ref.release = NULL;
-}
-
 static void hibmc_bo_ttm_destroy(struct ttm_buffer_object *tbo)
 {
        struct hibmc_bo *bo = container_of(tbo, struct hibmc_bo, bo);
@@ -237,18 +188,12 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc)
        struct drm_device *dev = hibmc->dev;
        struct ttm_bo_device *bdev = &hibmc->bdev;
 
-       ret = hibmc_ttm_global_init(hibmc);
-       if (ret)
-               return ret;
-
        ret = ttm_bo_device_init(&hibmc->bdev,
-                                hibmc->bo_global_ref.ref.object,
                                 &hibmc_bo_driver,
                                 dev->anon_inode->i_mapping,
                                 DRM_FILE_PAGE_OFFSET,
                                 true);
        if (ret) {
-               hibmc_ttm_global_release(hibmc);
                DRM_ERROR("error initializing bo driver: %d\n", ret);
                return ret;
        }
@@ -256,7 +201,6 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc)
        ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
                             hibmc->fb_size >> PAGE_SHIFT);
        if (ret) {
-               hibmc_ttm_global_release(hibmc);
                DRM_ERROR("failed ttm VRAM init: %d\n", ret);
                return ret;
        }
@@ -271,7 +215,6 @@ void hibmc_mm_fini(struct hibmc_drm_private *hibmc)
                return;
 
        ttm_bo_device_release(&hibmc->bdev);
-       hibmc_ttm_global_release(hibmc);
        hibmc->mm_inited = false;
 }
 
index 1c2857f13ad438f2433ae0a90a41227bd4e8f683..0ff878c994e2eef3f21638181d9615fb546c4783 100644 (file)
@@ -75,6 +75,7 @@ i915-y += i915_cmd_parser.o \
          i915_gemfs.o \
          i915_query.o \
          i915_request.o \
+         i915_scheduler.o \
          i915_timeline.o \
          i915_trace_points.o \
          i915_vma.o \
@@ -112,6 +113,8 @@ i915-y += intel_audio.o \
          intel_bios.o \
          intel_cdclk.o \
          intel_color.o \
+         intel_combo_phy.o \
+         intel_connector.o \
          intel_display.o \
          intel_dpio_phy.o \
          intel_dpll_mgr.o \
@@ -120,9 +123,9 @@ i915-y += intel_audio.o \
          intel_frontbuffer.o \
          intel_hdcp.o \
          intel_hotplug.o \
-         intel_modes.o \
          intel_overlay.o \
          intel_psr.o \
+         intel_quirks.o \
          intel_sideband.o \
          intel_sprite.o
 i915-$(CONFIG_ACPI)            += intel_acpi.o intel_opregion.o
@@ -142,6 +145,7 @@ i915-y += dvo_ch7017.o \
          intel_dp_link_training.o \
          intel_dp_mst.o \
          intel_dp.o \
+         intel_dsi.o \
          intel_dsi_dcs_backlight.o \
          intel_dsi_vbt.o \
          intel_dvo.o \
index 2402395a068da2fc5e83ba988aa76afccfe09f03..58e166effa456426d7fe2ee4f059a0533b4bfe0c 100644 (file)
@@ -1905,7 +1905,6 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
                vgpu_free_mm(mm);
                return ERR_PTR(-ENOMEM);
        }
-       mm->ggtt_mm.last_partial_off = -1UL;
 
        return mm;
 }
@@ -1930,7 +1929,6 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
                invalidate_ppgtt_mm(mm);
        } else {
                vfree(mm->ggtt_mm.virtual_ggtt);
-               mm->ggtt_mm.last_partial_off = -1UL;
        }
 
        vgpu_free_mm(mm);
@@ -2168,6 +2166,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
        struct intel_gvt_gtt_entry e, m;
        dma_addr_t dma_addr;
        int ret;
+       struct intel_gvt_partial_pte *partial_pte, *pos, *n;
+       bool partial_update = false;
 
        if (bytes != 4 && bytes != 8)
                return -EINVAL;
@@ -2178,68 +2178,57 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
        if (!vgpu_gmadr_is_valid(vgpu, gma))
                return 0;
 
-       ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
-
+       e.type = GTT_TYPE_GGTT_PTE;
        memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
                        bytes);
 
        /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
-        * write, we assume the two 4 bytes writes are consecutive.
-        * Otherwise, we abort and report error
+        * write, save the first 4 bytes in a list and update virtual
+        * PTE. Only update shadow PTE when the second 4 bytes comes.
         */
        if (bytes < info->gtt_entry_size) {
-               if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) {
-                       /* the first partial part*/
-                       ggtt_mm->ggtt_mm.last_partial_off = off;
-                       ggtt_mm->ggtt_mm.last_partial_data = e.val64;
-                       return 0;
-               } else if ((g_gtt_index ==
-                               (ggtt_mm->ggtt_mm.last_partial_off >>
-                               info->gtt_entry_size_shift)) &&
-                       (off != ggtt_mm->ggtt_mm.last_partial_off)) {
-                       /* the second partial part */
-
-                       int last_off = ggtt_mm->ggtt_mm.last_partial_off &
-                               (info->gtt_entry_size - 1);
-
-                       memcpy((void *)&e.val64 + last_off,
-                               (void *)&ggtt_mm->ggtt_mm.last_partial_data +
-                               last_off, bytes);
-
-                       ggtt_mm->ggtt_mm.last_partial_off = -1UL;
-               } else {
-                       int last_offset;
-
-                       gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n",
-                                       ggtt_mm->ggtt_mm.last_partial_off, off,
-                                       bytes, info->gtt_entry_size);
-
-                       /* set host ggtt entry to scratch page and clear
-                        * virtual ggtt entry as not present for last
-                        * partially write offset
-                        */
-                       last_offset = ggtt_mm->ggtt_mm.last_partial_off &
-                                       (~(info->gtt_entry_size - 1));
-
-                       ggtt_get_host_entry(ggtt_mm, &m, last_offset);
-                       ggtt_invalidate_pte(vgpu, &m);
-                       ops->set_pfn(&m, gvt->gtt.scratch_mfn);
-                       ops->clear_present(&m);
-                       ggtt_set_host_entry(ggtt_mm, &m, last_offset);
-                       ggtt_invalidate(gvt->dev_priv);
-
-                       ggtt_get_guest_entry(ggtt_mm, &e, last_offset);
-                       ops->clear_present(&e);
-                       ggtt_set_guest_entry(ggtt_mm, &e, last_offset);
-
-                       ggtt_mm->ggtt_mm.last_partial_off = off;
-                       ggtt_mm->ggtt_mm.last_partial_data = e.val64;
+               bool found = false;
+
+               list_for_each_entry_safe(pos, n,
+                               &ggtt_mm->ggtt_mm.partial_pte_list, list) {
+                       if (g_gtt_index == pos->offset >>
+                                       info->gtt_entry_size_shift) {
+                               if (off != pos->offset) {
+                                       /* the second partial part*/
+                                       int last_off = pos->offset &
+                                               (info->gtt_entry_size - 1);
+
+                                       memcpy((void *)&e.val64 + last_off,
+                                               (void *)&pos->data + last_off,
+                                               bytes);
+
+                                       list_del(&pos->list);
+                                       kfree(pos);
+                                       found = true;
+                                       break;
+                               }
+
+                               /* update of the first partial part */
+                               pos->data = e.val64;
+                               ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
+                               return 0;
+                       }
+               }
 
-                       return 0;
+               if (!found) {
+                       /* the first partial part */
+                       partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL);
+                       if (!partial_pte)
+                               return -ENOMEM;
+                       partial_pte->offset = off;
+                       partial_pte->data = e.val64;
+                       list_add_tail(&partial_pte->list,
+                               &ggtt_mm->ggtt_mm.partial_pte_list);
+                       partial_update = true;
                }
        }
 
-       if (ops->test_present(&e)) {
+       if (!partial_update && (ops->test_present(&e))) {
                gfn = ops->get_pfn(&e);
                m = e;
 
@@ -2263,16 +2252,18 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
                } else
                        ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
        } else {
-               ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index);
-               ggtt_invalidate_pte(vgpu, &m);
                ops->set_pfn(&m, gvt->gtt.scratch_mfn);
                ops->clear_present(&m);
        }
 
 out:
+       ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
+
+       ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
+       ggtt_invalidate_pte(vgpu, &e);
+
        ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
        ggtt_invalidate(gvt->dev_priv);
-       ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
        return 0;
 }
 
@@ -2430,6 +2421,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
 
        intel_vgpu_reset_ggtt(vgpu, false);
 
+       INIT_LIST_HEAD(&gtt->ggtt_mm->ggtt_mm.partial_pte_list);
+
        return create_scratch_page_tree(vgpu);
 }
 
@@ -2454,6 +2447,14 @@ static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
 
 static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
 {
+       struct intel_gvt_partial_pte *pos;
+
+       list_for_each_entry(pos,
+                       &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list, list) {
+               gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
+                       pos->offset, pos->data);
+               kfree(pos);
+       }
        intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
        vgpu->gtt.ggtt_mm = NULL;
 }
index 7a9b36176efb7fca7198527512f8873ad21248cb..d8cb04cc946dff3e19466ff387089db96c226d53 100644 (file)
@@ -35,7 +35,6 @@
 #define _GVT_GTT_H_
 
 #define I915_GTT_PAGE_SHIFT         12
-#define I915_GTT_PAGE_MASK             (~(I915_GTT_PAGE_SIZE - 1))
 
 struct intel_vgpu_mm;
 
@@ -133,6 +132,12 @@ enum intel_gvt_mm_type {
 
 #define GVT_RING_CTX_NR_PDPS   GEN8_3LVL_PDPES
 
+struct intel_gvt_partial_pte {
+       unsigned long offset;
+       u64 data;
+       struct list_head list;
+};
+
 struct intel_vgpu_mm {
        enum intel_gvt_mm_type type;
        struct intel_vgpu *vgpu;
@@ -157,8 +162,7 @@ struct intel_vgpu_mm {
                } ppgtt_mm;
                struct {
                        void *virtual_ggtt;
-                       unsigned long last_partial_off;
-                       u64 last_partial_data;
+                       struct list_head partial_pte_list;
                } ggtt_mm;
        };
 };
index 90f50f67909a090d72b4cee84077d0b530a4969b..aa280bb071254547fd3d810494bd488d4edbcd44 100644 (file)
@@ -1609,7 +1609,7 @@ static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
        return 0;
 }
 
-static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
+static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
                unsigned int offset, void *p_data, unsigned int bytes)
 {
        vgpu_vreg(vgpu, offset) = 0;
@@ -2607,6 +2607,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
        MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+
+       MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
+       MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
        return 0;
 }
 
@@ -3205,9 +3208,6 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
        MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
        MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
 
-       MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
-       MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
-
        MMIO_D(RC6_CTX_BASE, D_BXT);
 
        MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
index 10e63eea5492916f676011c98ab1751e9d02dac1..36a5147cd01e5224b2c6563c29128d05688e7fba 100644 (file)
@@ -131,7 +131,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
        {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
 
        {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
-       {RCS, GEN9_CSFE_CHICKEN1_RCS, 0x0, false}, /* 0x20d4 */
+       {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
 
        {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
        {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
index ea34003d6dd251e26ac8fad62607d043374e9b71..b8fbe3fabea3062203ad64d5ff33fd4ceebf0da2 100644 (file)
@@ -334,6 +334,28 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
        i915_gem_object_put(wa_ctx->indirect_ctx.obj);
 }
 
+static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
+                                        struct i915_gem_context *ctx)
+{
+       struct intel_vgpu_mm *mm = workload->shadow_mm;
+       struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
+       int i = 0;
+
+       if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
+               return -1;
+
+       if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
+               px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
+       } else {
+               for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
+                       px_dma(ppgtt->pdp.page_directory[i]) =
+                               mm->ppgtt_mm.shadow_pdps[i];
+               }
+       }
+
+       return 0;
+}
+
 /**
  * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
  * shadow it as well, include ringbuffer,wa_ctx and ctx.
@@ -358,6 +380,12 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
        if (workload->req)
                return 0;
 
+       ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
+       if (ret < 0) {
+               gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
+               return ret;
+       }
+
        /* pin shadow context by gvt even the shadow context will be pinned
         * when i915 alloc request. That is because gvt will update the guest
         * context from shadow context when workload is completed, and at that
index 4f3ac0a128893405c61e7ea4769215ce9d2cdc4e..7f455bca528e5007aa509250ffefbfbc732b84ea 100644 (file)
@@ -1788,6 +1788,8 @@ static int i915_emon_status(struct seq_file *m, void *unused)
        if (!IS_GEN5(dev_priv))
                return -ENODEV;
 
+       intel_runtime_pm_get(dev_priv);
+
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
                return ret;
@@ -1802,6 +1804,8 @@ static int i915_emon_status(struct seq_file *m, void *unused)
        seq_printf(m, "GFX power: %ld\n", gfx);
        seq_printf(m, "Total power: %ld\n", chipset + gfx);
 
+       intel_runtime_pm_put(dev_priv);
+
        return 0;
 }
 
@@ -2215,8 +2219,23 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct drm_device *dev = &dev_priv->drm;
        struct intel_rps *rps = &dev_priv->gt_pm.rps;
+       u32 act_freq = rps->cur_freq;
        struct drm_file *file;
 
+       if (intel_runtime_pm_get_if_in_use(dev_priv)) {
+               if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+                       mutex_lock(&dev_priv->pcu_lock);
+                       act_freq = vlv_punit_read(dev_priv,
+                                                 PUNIT_REG_GPU_FREQ_STS);
+                       act_freq = (act_freq >> 8) & 0xff;
+                       mutex_unlock(&dev_priv->pcu_lock);
+               } else {
+                       act_freq = intel_get_cagf(dev_priv,
+                                                 I915_READ(GEN6_RPSTAT1));
+               }
+               intel_runtime_pm_put(dev_priv);
+       }
+
        seq_printf(m, "RPS enabled? %d\n", rps->enabled);
        seq_printf(m, "GPU busy? %s [%d requests]\n",
                   yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
@@ -2224,8 +2243,9 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
        seq_printf(m, "Boosts outstanding? %d\n",
                   atomic_read(&rps->num_waiters));
        seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
-       seq_printf(m, "Frequency requested %d\n",
-                  intel_gpu_freq(dev_priv, rps->cur_freq));
+       seq_printf(m, "Frequency requested %d, actual %d\n",
+                  intel_gpu_freq(dev_priv, rps->cur_freq),
+                  intel_gpu_freq(dev_priv, act_freq));
        seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
                   intel_gpu_freq(dev_priv, rps->min_freq),
                   intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
@@ -2900,16 +2920,15 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
        seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
                   CSR_VERSION_MINOR(csr->version));
 
-       if (IS_KABYLAKE(dev_priv) ||
-           (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
-               seq_printf(m, "DC3 -> DC5 count: %d\n",
-                          I915_READ(SKL_CSR_DC3_DC5_COUNT));
+       if (WARN_ON(INTEL_GEN(dev_priv) > 11))
+               goto out;
+
+       seq_printf(m, "DC3 -> DC5 count: %d\n",
+                  I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
+                                                   SKL_CSR_DC3_DC5_COUNT));
+       if (!IS_GEN9_LP(dev_priv))
                seq_printf(m, "DC5 -> DC6 count: %d\n",
                           I915_READ(SKL_CSR_DC5_DC6_COUNT));
-       } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
-               seq_printf(m, "DC3 -> DC5 count: %d\n",
-                          I915_READ(BXT_CSR_DC3_DC5_COUNT));
-       }
 
 out:
        seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
@@ -3049,16 +3068,17 @@ static void intel_connector_info(struct seq_file *m,
        seq_printf(m, "connector %d: type %s, status: %s\n",
                   connector->base.id, connector->name,
                   drm_get_connector_status_name(connector->status));
-       if (connector->status == connector_status_connected) {
-               seq_printf(m, "\tname: %s\n", connector->display_info.name);
-               seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
-                          connector->display_info.width_mm,
-                          connector->display_info.height_mm);
-               seq_printf(m, "\tsubpixel order: %s\n",
-                          drm_get_subpixel_order_name(connector->display_info.subpixel_order));
-               seq_printf(m, "\tCEA rev: %d\n",
-                          connector->display_info.cea_rev);
-       }
+
+       if (connector->status == connector_status_disconnected)
+               return;
+
+       seq_printf(m, "\tname: %s\n", connector->display_info.name);
+       seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
+                  connector->display_info.width_mm,
+                  connector->display_info.height_mm);
+       seq_printf(m, "\tsubpixel order: %s\n",
+                  drm_get_subpixel_order_name(connector->display_info.subpixel_order));
+       seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
 
        if (!intel_encoder)
                return;
@@ -4172,6 +4192,7 @@ i915_drop_caches_set(void *data, u64 val)
 
        DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
                  val, val & DROP_ALL);
+       intel_runtime_pm_get(i915);
 
        if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
                i915_gem_set_wedged(i915);
@@ -4181,7 +4202,7 @@ i915_drop_caches_set(void *data, u64 val)
        if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
                ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
                if (ret)
-                       return ret;
+                       goto out;
 
                if (val & DROP_ACTIVE)
                        ret = i915_gem_wait_for_idle(i915,
@@ -4189,11 +4210,8 @@ i915_drop_caches_set(void *data, u64 val)
                                                     I915_WAIT_LOCKED,
                                                     MAX_SCHEDULE_TIMEOUT);
 
-               if (ret == 0 && val & DROP_RESET_SEQNO) {
-                       intel_runtime_pm_get(i915);
+               if (ret == 0 && val & DROP_RESET_SEQNO)
                        ret = i915_gem_set_global_seqno(&i915->drm, 1);
-                       intel_runtime_pm_put(i915);
-               }
 
                if (val & DROP_RETIRE)
                        i915_retire_requests(i915);
@@ -4231,6 +4249,9 @@ i915_drop_caches_set(void *data, u64 val)
        if (val & DROP_FREED)
                i915_gem_drain_freed_objects(i915);
 
+out:
+       intel_runtime_pm_put(i915);
+
        return ret;
 }
 
@@ -4331,7 +4352,7 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
        for (s = 0; s < info->sseu.max_slices; s++) {
                /*
                 * FIXME: Valid SS Mask respects the spec and read
-                * only valid bits for those registers, excluding reserverd
+                * only valid bits for those registers, excluding reserved
                 * although this seems wrong because it would leave many
                 * subslices without ACK.
                 */
@@ -4641,24 +4662,122 @@ static const struct file_operations i915_hpd_storm_ctl_fops = {
        .write = i915_hpd_storm_ctl_write
 };
 
+static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
+{
+       struct drm_i915_private *dev_priv = m->private;
+
+       seq_printf(m, "Enabled: %s\n",
+                  yesno(dev_priv->hotplug.hpd_short_storm_enabled));
+
+       return 0;
+}
+
+static int
+i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, i915_hpd_short_storm_ctl_show,
+                          inode->i_private);
+}
+
+static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
+                                             const char __user *ubuf,
+                                             size_t len, loff_t *offp)
+{
+       struct seq_file *m = file->private_data;
+       struct drm_i915_private *dev_priv = m->private;
+       struct i915_hotplug *hotplug = &dev_priv->hotplug;
+       char *newline;
+       char tmp[16];
+       int i;
+       bool new_state;
+
+       if (len >= sizeof(tmp))
+               return -EINVAL;
+
+       if (copy_from_user(tmp, ubuf, len))
+               return -EFAULT;
+
+       tmp[len] = '\0';
+
+       /* Strip newline, if any */
+       newline = strchr(tmp, '\n');
+       if (newline)
+               *newline = '\0';
+
+       /* Reset to the "default" state for this system */
+       if (strcmp(tmp, "reset") == 0)
+               new_state = !HAS_DP_MST(dev_priv);
+       else if (kstrtobool(tmp, &new_state) != 0)
+               return -EINVAL;
+
+       DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
+                     new_state ? "En" : "Dis");
+
+       spin_lock_irq(&dev_priv->irq_lock);
+       hotplug->hpd_short_storm_enabled = new_state;
+       /* Reset the HPD storm stats so we don't accidentally trigger a storm */
+       for_each_hpd_pin(i)
+               hotplug->stats[i].count = 0;
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       /* Re-enable hpd immediately if we were in an irq storm */
+       flush_delayed_work(&dev_priv->hotplug.reenable_work);
+
+       return len;
+}
+
+static const struct file_operations i915_hpd_short_storm_ctl_fops = {
+       .owner = THIS_MODULE,
+       .open = i915_hpd_short_storm_ctl_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+       .write = i915_hpd_short_storm_ctl_write,
+};
+
 static int i915_drrs_ctl_set(void *data, u64 val)
 {
        struct drm_i915_private *dev_priv = data;
        struct drm_device *dev = &dev_priv->drm;
-       struct intel_crtc *intel_crtc;
-       struct intel_encoder *encoder;
-       struct intel_dp *intel_dp;
+       struct intel_crtc *crtc;
 
        if (INTEL_GEN(dev_priv) < 7)
                return -ENODEV;
 
-       drm_modeset_lock_all(dev);
-       for_each_intel_crtc(dev, intel_crtc) {
-               if (!intel_crtc->base.state->active ||
-                                       !intel_crtc->config->has_drrs)
-                       continue;
+       for_each_intel_crtc(dev, crtc) {
+               struct drm_connector_list_iter conn_iter;
+               struct intel_crtc_state *crtc_state;
+               struct drm_connector *connector;
+               struct drm_crtc_commit *commit;
+               int ret;
+
+               ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
+               if (ret)
+                       return ret;
+
+               crtc_state = to_intel_crtc_state(crtc->base.state);
+
+               if (!crtc_state->base.active ||
+                   !crtc_state->has_drrs)
+                       goto out;
 
-               for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) {
+               commit = crtc_state->base.commit;
+               if (commit) {
+                       ret = wait_for_completion_interruptible(&commit->hw_done);
+                       if (ret)
+                               goto out;
+               }
+
+               drm_connector_list_iter_begin(dev, &conn_iter);
+               drm_for_each_connector_iter(connector, &conn_iter) {
+                       struct intel_encoder *encoder;
+                       struct intel_dp *intel_dp;
+
+                       if (!(crtc_state->base.connector_mask &
+                             drm_connector_mask(connector)))
+                               continue;
+
+                       encoder = intel_attached_encoder(connector);
                        if (encoder->type != INTEL_OUTPUT_EDP)
                                continue;
 
@@ -4668,13 +4787,18 @@ static int i915_drrs_ctl_set(void *data, u64 val)
                        intel_dp = enc_to_intel_dp(&encoder->base);
                        if (val)
                                intel_edp_drrs_enable(intel_dp,
-                                                       intel_crtc->config);
+                                                     crtc_state);
                        else
                                intel_edp_drrs_disable(intel_dp,
-                                                       intel_crtc->config);
+                                                      crtc_state);
                }
+               drm_connector_list_iter_end(&conn_iter);
+
+out:
+               drm_modeset_unlock(&crtc->base.mutex);
+               if (ret)
+                       return ret;
        }
-       drm_modeset_unlock_all(dev);
 
        return 0;
 }
@@ -4818,6 +4942,7 @@ static const struct i915_debugfs_files {
        {"i915_guc_log_level", &i915_guc_log_level_fops},
        {"i915_guc_log_relay", &i915_guc_log_relay_fops},
        {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
+       {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
        {"i915_ipc_status", &i915_ipc_status_fops},
        {"i915_drrs_ctl", &i915_drrs_ctl_fops},
        {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
@@ -4899,13 +5024,10 @@ static int i915_dpcd_show(struct seq_file *m, void *data)
                        continue;
 
                err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
-               if (err <= 0) {
-                       DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
-                                 size, b->offset, err);
-                       continue;
-               }
-
-               seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
+               if (err < 0)
+                       seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
+               else
+                       seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
        }
 
        return 0;
@@ -4934,6 +5056,28 @@ static int i915_panel_show(struct seq_file *m, void *data)
 }
 DEFINE_SHOW_ATTRIBUTE(i915_panel);
 
+static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
+{
+       struct drm_connector *connector = m->private;
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+
+       if (connector->status != connector_status_connected)
+               return -ENODEV;
+
+       /* HDCP is supported by connector */
+       if (!intel_connector->hdcp.shim)
+               return -EINVAL;
+
+       seq_printf(m, "%s:%d HDCP version: ", connector->name,
+                  connector->base.id);
+       seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
+                  "None" : "HDCP1.4");
+       seq_puts(m, "\n");
+
+       return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
+
 /**
  * i915_debugfs_connector_add - add i915 specific connector debugfs files
  * @connector: pointer to a registered drm_connector
@@ -4963,5 +5107,12 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
                                    connector, &i915_psr_sink_status_fops);
        }
 
+       if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+           connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+           connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+               debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
+                                   connector, &i915_hdcp_sink_capability_fops);
+       }
+
        return 0;
 }
index 44e2c0f5ec502bc1a6c27007c77d56df89019ce3..b1d23c73c147cf3779dc6003095fcc7ca8cdf109 100644 (file)
@@ -345,7 +345,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
                value = HAS_WT(dev_priv);
                break;
        case I915_PARAM_HAS_ALIASING_PPGTT:
-               value = USES_PPGTT(dev_priv);
+               value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL);
                break;
        case I915_PARAM_HAS_SEMAPHORES:
                value = HAS_LEGACY_SEMAPHORES(dev_priv);
@@ -645,6 +645,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
        if (i915_inject_load_failure())
                return -ENODEV;
 
+       if (INTEL_INFO(dev_priv)->num_pipes) {
+               ret = drm_vblank_init(&dev_priv->drm,
+                                     INTEL_INFO(dev_priv)->num_pipes);
+               if (ret)
+                       goto out;
+       }
+
        intel_bios_init(dev_priv);
 
        /* If we have > 1 VGA cards, then we need to arbitrate access
@@ -687,7 +694,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
        if (ret)
                goto cleanup_modeset;
 
-       intel_setup_overlay(dev_priv);
+       intel_overlay_setup(dev_priv);
 
        if (INTEL_INFO(dev_priv)->num_pipes == 0)
                return 0;
@@ -699,6 +706,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
        /* Only enable hotplug handling once the fbdev is fully set up. */
        intel_hpd_init(dev_priv);
 
+       intel_init_ipc(dev_priv);
+
        return 0;
 
 cleanup_gem:
@@ -1030,6 +1039,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
 
 err_uncore:
        intel_uncore_fini(dev_priv);
+       i915_mmio_cleanup(dev_priv);
 err_bridge:
        pci_dev_put(dev_priv->bridge_dev);
 
@@ -1049,17 +1059,6 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
 
 static void intel_sanitize_options(struct drm_i915_private *dev_priv)
 {
-       /*
-        * i915.enable_ppgtt is read-only, so do an early pass to validate the
-        * user's requested state against the hardware/driver capabilities.  We
-        * do this now so that we can print out any log messages once rather
-        * than every time we check intel_enable_ppgtt().
-        */
-       i915_modparams.enable_ppgtt =
-               intel_sanitize_enable_ppgtt(dev_priv,
-                                           i915_modparams.enable_ppgtt);
-       DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt);
-
        intel_gvt_sanitize_options(dev_priv);
 }
 
@@ -1175,8 +1174,6 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
                return -EINVAL;
        }
 
-       dram_info->valid_dimm = true;
-
        /*
         * If any of the channel is single rank channel, worst case output
         * will be same as if single rank memory, so consider single rank
@@ -1193,8 +1190,7 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
                return -EINVAL;
        }
 
-       if (ch0.is_16gb_dimm || ch1.is_16gb_dimm)
-               dram_info->is_16gb_dimm = true;
+       dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
 
        dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0,
                                                                       val_ch1,
@@ -1314,7 +1310,6 @@ bxt_get_dram_info(struct drm_i915_private *dev_priv)
                return -EINVAL;
        }
 
-       dram_info->valid_dimm = true;
        dram_info->valid = true;
        return 0;
 }
@@ -1327,19 +1322,24 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
        int ret;
 
        dram_info->valid = false;
-       dram_info->valid_dimm = false;
-       dram_info->is_16gb_dimm = false;
        dram_info->rank = I915_DRAM_RANK_INVALID;
        dram_info->bandwidth_kbps = 0;
        dram_info->num_channels = 0;
 
+       /*
+        * Assume 16Gb DIMMs are present until proven otherwise.
+        * This is only used for the level 0 watermark latency
+        * w/a which does not apply to bxt/glk.
+        */
+       dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
+
        if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv))
                return;
 
        /* Need to calculate bandwidth only for Gen9 */
        if (IS_BROXTON(dev_priv))
                ret = bxt_get_dram_info(dev_priv);
-       else if (INTEL_GEN(dev_priv) == 9)
+       else if (IS_GEN9(dev_priv))
                ret = skl_get_dram_info(dev_priv);
        else
                ret = skl_dram_get_channels_info(dev_priv);
@@ -1374,6 +1374,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 
        intel_device_info_runtime_init(mkwrite_device_info(dev_priv));
 
+       if (HAS_PPGTT(dev_priv)) {
+               if (intel_vgpu_active(dev_priv) &&
+                   !intel_vgpu_has_full_48bit_ppgtt(dev_priv)) {
+                       i915_report_error(dev_priv,
+                                         "incompatible vGPU found, support for isolated ppGTT required\n");
+                       return -ENXIO;
+               }
+       }
+
        intel_sanitize_options(dev_priv);
 
        i915_perf_init(dev_priv);
@@ -1629,14 +1638,16 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
                (struct intel_device_info *)ent->driver_data;
        struct intel_device_info *device_info;
        struct drm_i915_private *i915;
+       int err;
 
        i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
        if (!i915)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
-       if (drm_dev_init(&i915->drm, &driver, &pdev->dev)) {
+       err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
+       if (err) {
                kfree(i915);
-               return NULL;
+               return ERR_PTR(err);
        }
 
        i915->drm.pdev = pdev;
@@ -1649,8 +1660,8 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
        device_info->device_id = pdev->device;
 
        BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
-                    sizeof(device_info->platform_mask) * BITS_PER_BYTE);
-       BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
+                    BITS_PER_TYPE(device_info->platform_mask));
+       BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
 
        return i915;
 }
@@ -1685,8 +1696,8 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
        int ret;
 
        dev_priv = i915_driver_create(pdev, ent);
-       if (!dev_priv)
-               return -ENOMEM;
+       if (IS_ERR(dev_priv))
+               return PTR_ERR(dev_priv);
 
        /* Disable nuclear pageflip by default on pre-ILK */
        if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
@@ -1710,26 +1721,12 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (ret < 0)
                goto out_cleanup_mmio;
 
-       /*
-        * TODO: move the vblank init and parts of modeset init steps into one
-        * of the i915_driver_init_/i915_driver_register functions according
-        * to the role/effect of the given init step.
-        */
-       if (INTEL_INFO(dev_priv)->num_pipes) {
-               ret = drm_vblank_init(&dev_priv->drm,
-                                     INTEL_INFO(dev_priv)->num_pipes);
-               if (ret)
-                       goto out_cleanup_hw;
-       }
-
        ret = i915_load_modeset_init(&dev_priv->drm);
        if (ret < 0)
                goto out_cleanup_hw;
 
        i915_driver_register(dev_priv);
 
-       intel_init_ipc(dev_priv);
-
        enable_rpm_wakeref_asserts(dev_priv);
 
        i915_welcome_messages(dev_priv);
@@ -1781,7 +1778,6 @@ void i915_driver_unload(struct drm_device *dev)
        i915_reset_error_state(dev_priv);
 
        i915_gem_fini(dev_priv);
-       intel_fbc_cleanup_cfb(dev_priv);
 
        intel_power_domains_fini_hw(dev_priv);
 
@@ -1919,9 +1915,7 @@ static int i915_drm_suspend(struct drm_device *dev)
        i915_save_state(dev_priv);
 
        opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
-       intel_opregion_notify_adapter(dev_priv, opregion_target_state);
-
-       intel_opregion_unregister(dev_priv);
+       intel_opregion_suspend(dev_priv, opregion_target_state);
 
        intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
 
@@ -1962,7 +1956,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
                                    get_suspend_mode(dev_priv, hibernation));
 
        ret = 0;
-       if (IS_GEN9_LP(dev_priv))
+       if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv))
                bxt_enable_dc9(dev_priv);
        else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                hsw_enable_pc8(dev_priv);
@@ -2040,7 +2034,6 @@ static int i915_drm_resume(struct drm_device *dev)
 
        i915_restore_state(dev_priv);
        intel_pps_unlock_regs_wa(dev_priv);
-       intel_opregion_setup(dev_priv);
 
        intel_init_pch_refclk(dev_priv);
 
@@ -2082,12 +2075,10 @@ static int i915_drm_resume(struct drm_device *dev)
         * */
        intel_hpd_init(dev_priv);
 
-       intel_opregion_register(dev_priv);
+       intel_opregion_resume(dev_priv);
 
        intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
 
-       intel_opregion_notify_adapter(dev_priv, PCI_D0);
-
        intel_power_domains_enable(dev_priv);
 
        enable_rpm_wakeref_asserts(dev_priv);
@@ -2155,7 +2146,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
 
        intel_uncore_resume_early(dev_priv);
 
-       if (IS_GEN9_LP(dev_priv)) {
+       if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
                gen9_sanitize_dc_state(dev_priv);
                bxt_disable_dc9(dev_priv);
        } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -2922,7 +2913,10 @@ static int intel_runtime_suspend(struct device *kdev)
        intel_uncore_suspend(dev_priv);
 
        ret = 0;
-       if (IS_GEN9_LP(dev_priv)) {
+       if (INTEL_GEN(dev_priv) >= 11) {
+               icl_display_core_uninit(dev_priv);
+               bxt_enable_dc9(dev_priv);
+       } else if (IS_GEN9_LP(dev_priv)) {
                bxt_display_core_uninit(dev_priv);
                bxt_enable_dc9(dev_priv);
        } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -3007,7 +3001,18 @@ static int intel_runtime_resume(struct device *kdev)
        if (intel_uncore_unclaimed_mmio(dev_priv))
                DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
 
-       if (IS_GEN9_LP(dev_priv)) {
+       if (INTEL_GEN(dev_priv) >= 11) {
+               bxt_disable_dc9(dev_priv);
+               icl_display_core_init(dev_priv, true);
+               if (dev_priv->csr.dmc_payload) {
+                       if (dev_priv->csr.allowed_dc_mask &
+                           DC_STATE_EN_UPTO_DC6)
+                               skl_enable_dc6(dev_priv);
+                       else if (dev_priv->csr.allowed_dc_mask &
+                                DC_STATE_EN_UPTO_DC5)
+                               gen9_enable_dc5(dev_priv);
+               }
+       } else if (IS_GEN9_LP(dev_priv)) {
                bxt_disable_dc9(dev_priv);
                bxt_display_core_init(dev_priv, true);
                if (dev_priv->csr.dmc_payload &&
index 8624b4bdc242dd7cbd77d527eb3b84fe59a0777f..4064e49dbf70e3807f595c48145b519bd222ae92 100644 (file)
@@ -54,6 +54,7 @@
 #include <drm/drm_cache.h>
 #include <drm/drm_util.h>
 
+#include "i915_fixed.h"
 #include "i915_params.h"
 #include "i915_reg.h"
 #include "i915_utils.h"
@@ -87,8 +88,8 @@
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20180921"
-#define DRIVER_TIMESTAMP       1537521997
+#define DRIVER_DATE            "20181122"
+#define DRIVER_TIMESTAMP       1542898187
 
 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
  * WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -127,144 +128,6 @@ bool i915_error_injected(void);
        __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
                      fmt, ##__VA_ARGS__)
 
-typedef struct {
-       uint32_t val;
-} uint_fixed_16_16_t;
-
-#define FP_16_16_MAX ({ \
-       uint_fixed_16_16_t fp; \
-       fp.val = UINT_MAX; \
-       fp; \
-})
-
-static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
-{
-       if (val.val == 0)
-               return true;
-       return false;
-}
-
-static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val)
-{
-       uint_fixed_16_16_t fp;
-
-       WARN_ON(val > U16_MAX);
-
-       fp.val = val << 16;
-       return fp;
-}
-
-static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
-{
-       return DIV_ROUND_UP(fp.val, 1 << 16);
-}
-
-static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp)
-{
-       return fp.val >> 16;
-}
-
-static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
-                                                uint_fixed_16_16_t min2)
-{
-       uint_fixed_16_16_t min;
-
-       min.val = min(min1.val, min2.val);
-       return min;
-}
-
-static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
-                                                uint_fixed_16_16_t max2)
-{
-       uint_fixed_16_16_t max;
-
-       max.val = max(max1.val, max2.val);
-       return max;
-}
-
-static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val)
-{
-       uint_fixed_16_16_t fp;
-       WARN_ON(val > U32_MAX);
-       fp.val = (uint32_t) val;
-       return fp;
-}
-
-static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val,
-                                           uint_fixed_16_16_t d)
-{
-       return DIV_ROUND_UP(val.val, d.val);
-}
-
-static inline uint32_t mul_round_up_u32_fixed16(uint32_t val,
-                                               uint_fixed_16_16_t mul)
-{
-       uint64_t intermediate_val;
-
-       intermediate_val = (uint64_t) val * mul.val;
-       intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16);
-       WARN_ON(intermediate_val > U32_MAX);
-       return (uint32_t) intermediate_val;
-}
-
-static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
-                                            uint_fixed_16_16_t mul)
-{
-       uint64_t intermediate_val;
-
-       intermediate_val = (uint64_t) val.val * mul.val;
-       intermediate_val = intermediate_val >> 16;
-       return clamp_u64_to_fixed16(intermediate_val);
-}
-
-static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d)
-{
-       uint64_t interm_val;
-
-       interm_val = (uint64_t)val << 16;
-       interm_val = DIV_ROUND_UP_ULL(interm_val, d);
-       return clamp_u64_to_fixed16(interm_val);
-}
-
-static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
-                                               uint_fixed_16_16_t d)
-{
-       uint64_t interm_val;
-
-       interm_val = (uint64_t)val << 16;
-       interm_val = DIV_ROUND_UP_ULL(interm_val, d.val);
-       WARN_ON(interm_val > U32_MAX);
-       return (uint32_t) interm_val;
-}
-
-static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val,
-                                                    uint_fixed_16_16_t mul)
-{
-       uint64_t intermediate_val;
-
-       intermediate_val = (uint64_t) val * mul.val;
-       return clamp_u64_to_fixed16(intermediate_val);
-}
-
-static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
-                                            uint_fixed_16_16_t add2)
-{
-       uint64_t interm_sum;
-
-       interm_sum = (uint64_t) add1.val + add2.val;
-       return clamp_u64_to_fixed16(interm_sum);
-}
-
-static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
-                                                uint32_t add2)
-{
-       uint64_t interm_sum;
-       uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2);
-
-       interm_sum = (uint64_t) add1.val + interm_add2.val;
-       return clamp_u64_to_fixed16(interm_sum);
-}
-
 enum hpd_pin {
        HPD_NONE = 0,
        HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
@@ -283,7 +146,8 @@ enum hpd_pin {
 #define for_each_hpd_pin(__pin) \
        for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
 
-#define HPD_STORM_DEFAULT_THRESHOLD 5
+/* Threshold == 5 for long IRQs, 50 for short */
+#define HPD_STORM_DEFAULT_THRESHOLD 50
 
 struct i915_hotplug {
        struct work_struct hotplug_work;
@@ -308,6 +172,8 @@ struct i915_hotplug {
        bool poll_enabled;
 
        unsigned int hpd_storm_threshold;
+       /* Whether or not to count short HPD IRQs in HPD storms */
+       u8 hpd_short_storm_enabled;
 
        /*
         * if we get a HPD irq from DP and a HPD irq from non-DP
@@ -465,8 +331,10 @@ struct drm_i915_display_funcs {
 struct intel_csr {
        struct work_struct work;
        const char *fw_path;
+       uint32_t required_version;
+       uint32_t max_fw_size; /* bytes */
        uint32_t *dmc_payload;
-       uint32_t dmc_fw_size;
+       uint32_t dmc_fw_size; /* dwords */
        uint32_t version;
        uint32_t mmio_count;
        i915_reg_t mmioaddr[8];
@@ -546,6 +414,8 @@ struct intel_fbc {
                        int adjusted_y;
 
                        int y;
+
+                       uint16_t pixel_blend_mode;
                } plane;
 
                struct {
@@ -630,7 +500,6 @@ struct i915_psr {
        bool sink_psr2_support;
        bool link_standby;
        bool colorimetry_support;
-       bool alpm;
        bool psr2_enabled;
        u8 sink_sync_latency;
        ktime_t last_entry_attempt;
@@ -918,6 +787,11 @@ struct i915_power_well_desc {
                        /* The pw is backing the VGA functionality */
                        bool has_vga:1;
                        bool has_fuses:1;
+                       /*
+                        * The pw is for an ICL+ TypeC PHY port in
+                        * Thunderbolt mode.
+                        */
+                       bool is_tc_tbt:1;
                } hsw;
        };
        const struct i915_power_well_ops *ops;
@@ -1042,17 +916,6 @@ struct i915_gem_mm {
 
 #define I915_ENGINE_WEDGED_TIMEOUT  (60 * HZ)  /* Reset but no recovery? */
 
-#define DP_AUX_A 0x40
-#define DP_AUX_B 0x10
-#define DP_AUX_C 0x20
-#define DP_AUX_D 0x30
-#define DP_AUX_E 0x50
-#define DP_AUX_F 0x60
-
-#define DDC_PIN_B  0x05
-#define DDC_PIN_C  0x04
-#define DDC_PIN_D  0x06
-
 struct ddi_vbt_port_info {
        int max_tmds_clock;
 
@@ -1099,6 +962,7 @@ struct intel_vbt_data {
        unsigned int panel_type:4;
        int lvds_ssc_freq;
        unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+       enum drm_panel_orientation orientation;
 
        enum drrs_support_type drrs_type;
 
@@ -1144,6 +1008,7 @@ struct intel_vbt_data {
                u8 *data;
                const u8 *sequence[MIPI_SEQ_MAX];
                u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
+               enum drm_panel_orientation orientation;
        } dsi;
 
        int crt_ddc_pin;
@@ -1240,9 +1105,9 @@ struct skl_ddb_values {
 };
 
 struct skl_wm_level {
-       bool plane_en;
        uint16_t plane_res_b;
        uint8_t plane_res_l;
+       bool plane_en;
 };
 
 /* Stores plane specific WM parameters */
@@ -1519,31 +1384,13 @@ struct i915_oa_ops {
         */
        bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);
 
-       /**
-        * @init_oa_buffer: Resets the head and tail pointers of the
-        * circular buffer for periodic OA reports.
-        *
-        * Called when first opening a stream for OA metrics, but also may be
-        * called in response to an OA buffer overflow or other error
-        * condition.
-        *
-        * Note it may be necessary to clear the full OA buffer here as part of
-        * maintaining the invariable that new reports must be written to
-        * zeroed memory for us to be able to reliable detect if an expected
-        * report has not yet landed in memory.  (At least on Haswell the OA
-        * buffer tail pointer is not synchronized with reports being visible
-        * to the CPU)
-        */
-       void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
-
        /**
         * @enable_metric_set: Selects and applies any MUX configuration to set
         * up the Boolean and Custom (B/C) counters that are part of the
         * counter reports being sampled. May apply system constraints such as
         * disabling EU clock gating as required.
         */
-       int (*enable_metric_set)(struct drm_i915_private *dev_priv,
-                                const struct i915_oa_config *oa_config);
+       int (*enable_metric_set)(struct i915_perf_stream *stream);
 
        /**
         * @disable_metric_set: Remove system constraints associated with using
@@ -1554,12 +1401,12 @@ struct i915_oa_ops {
        /**
         * @oa_enable: Enable periodic sampling
         */
-       void (*oa_enable)(struct drm_i915_private *dev_priv);
+       void (*oa_enable)(struct i915_perf_stream *stream);
 
        /**
         * @oa_disable: Disable periodic sampling
         */
-       void (*oa_disable)(struct drm_i915_private *dev_priv);
+       void (*oa_disable)(struct i915_perf_stream *stream);
 
        /**
         * @read: Copy data from the circular OA buffer into a given userspace
@@ -1948,7 +1795,6 @@ struct drm_i915_private {
 
        struct dram_info {
                bool valid;
-               bool valid_dimm;
                bool is_16gb_dimm;
                u8 num_channels;
                enum dram_rank {
@@ -2323,6 +2169,8 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
             (((__iter).curr += PAGE_SIZE) >= (__iter).max) ?           \
             (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
 
+bool i915_sg_trim(struct sg_table *orig_st);
+
 static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
 {
        unsigned int page_sizes;
@@ -2368,20 +2216,12 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define REVID_FOREVER          0xff
 #define INTEL_REVID(dev_priv)  ((dev_priv)->drm.pdev->revision)
 
-#define GEN_FOREVER (0)
-
 #define INTEL_GEN_MASK(s, e) ( \
        BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
        BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
-       GENMASK((e) != GEN_FOREVER ? (e) - 1 : BITS_PER_LONG - 1, \
-               (s) != GEN_FOREVER ? (s) - 1 : 0) \
-)
+       GENMASK((e) - 1, (s) - 1))
 
-/*
- * Returns true if Gen is in inclusive range [Start, End].
- *
- * Use GEN_FOREVER for unbound start and or end.
- */
+/* Returns true if Gen is in inclusive range [Start, End] */
 #define IS_GEN(dev_priv, s, e) \
        (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e))))
 
@@ -2462,6 +2302,8 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define IS_KBL_ULX(dev_priv)   (INTEL_DEVID(dev_priv) == 0x590E || \
                                 INTEL_DEVID(dev_priv) == 0x5915 || \
                                 INTEL_DEVID(dev_priv) == 0x591E)
+#define IS_AML_ULX(dev_priv)   (INTEL_DEVID(dev_priv) == 0x591C || \
+                                INTEL_DEVID(dev_priv) == 0x87C0)
 #define IS_SKL_GT2(dev_priv)   (IS_SKYLAKE(dev_priv) && \
                                 (dev_priv)->info.gt == 2)
 #define IS_SKL_GT3(dev_priv)   (IS_SKYLAKE(dev_priv) && \
@@ -2593,9 +2435,14 @@ intel_info(const struct drm_i915_private *dev_priv)
 
 #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
 
-#define USES_PPGTT(dev_priv)           (i915_modparams.enable_ppgtt)
-#define USES_FULL_PPGTT(dev_priv)      (i915_modparams.enable_ppgtt >= 2)
-#define USES_FULL_48BIT_PPGTT(dev_priv)        (i915_modparams.enable_ppgtt == 3)
+#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt)
+#define HAS_PPGTT(dev_priv) \
+       (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
+#define HAS_FULL_PPGTT(dev_priv) \
+       (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
+#define HAS_FULL_48BIT_PPGTT(dev_priv) \
+       (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL_4LVL)
+
 #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
        GEM_BUG_ON((sizes) == 0); \
        ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
@@ -2743,9 +2590,6 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
        return IS_BROXTON(dev_priv) && intel_vtd_active();
 }
 
-int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
-                               int enable_ppgtt);
-
 /* i915_drv.c */
 void __printf(3, 4)
 __i915_printk(struct drm_i915_private *dev_priv, const char *level,
@@ -3230,7 +3074,7 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
                                  unsigned int flags,
                                  const struct i915_sched_attr *attr);
-#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
+#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
 
 int __must_check
 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
@@ -3462,6 +3306,7 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
                                     enum port port);
 bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
                                enum port port);
+enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
 
 /* intel_acpi.c */
 #ifdef CONFIG_ACPI
@@ -3483,8 +3328,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
 extern void intel_modeset_init_hw(struct drm_device *dev);
 extern int intel_modeset_init(struct drm_device *dev);
 extern void intel_modeset_cleanup(struct drm_device *dev);
-extern int intel_connector_register(struct drm_connector *);
-extern void intel_connector_unregister(struct drm_connector *);
 extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
                                       bool state);
 extern void intel_display_resume(struct drm_device *dev);
@@ -3584,6 +3427,12 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
 void vlv_phy_reset_lanes(struct intel_encoder *encoder,
                         const struct intel_crtc_state *old_crtc_state);
 
+/* intel_combo_phy.c */
+void icl_combo_phys_init(struct drm_i915_private *dev_priv);
+void icl_combo_phys_uninit(struct drm_i915_private *dev_priv);
+void cnl_combo_phys_init(struct drm_i915_private *dev_priv);
+void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv);
+
 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
 u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/i915_fixed.h b/drivers/gpu/drm/i915/i915_fixed.h
new file mode 100644 (file)
index 0000000..591dd89
--- /dev/null
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef _I915_FIXED_H_
+#define _I915_FIXED_H_
+
+typedef struct {
+       u32 val;
+} uint_fixed_16_16_t;
+
+#define FP_16_16_MAX ((uint_fixed_16_16_t){ .val = UINT_MAX })
+
+static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
+{
+       return val.val == 0;
+}
+
+static inline uint_fixed_16_16_t u32_to_fixed16(u32 val)
+{
+       uint_fixed_16_16_t fp = { .val = val << 16 };
+
+       WARN_ON(val > U16_MAX);
+
+       return fp;
+}
+
+static inline u32 fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
+{
+       return DIV_ROUND_UP(fp.val, 1 << 16);
+}
+
+static inline u32 fixed16_to_u32(uint_fixed_16_16_t fp)
+{
+       return fp.val >> 16;
+}
+
+static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
+                                            uint_fixed_16_16_t min2)
+{
+       uint_fixed_16_16_t min = { .val = min(min1.val, min2.val) };
+
+       return min;
+}
+
+static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
+                                            uint_fixed_16_16_t max2)
+{
+       uint_fixed_16_16_t max = { .val = max(max1.val, max2.val) };
+
+       return max;
+}
+
+static inline uint_fixed_16_16_t clamp_u64_to_fixed16(u64 val)
+{
+       uint_fixed_16_16_t fp = { .val = (u32)val };
+
+       WARN_ON(val > U32_MAX);
+
+       return fp;
+}
+
+static inline u32 div_round_up_fixed16(uint_fixed_16_16_t val,
+                                      uint_fixed_16_16_t d)
+{
+       return DIV_ROUND_UP(val.val, d.val);
+}
+
+static inline u32 mul_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
+{
+       u64 tmp;
+
+       tmp = (u64)val * mul.val;
+       tmp = DIV_ROUND_UP_ULL(tmp, 1 << 16);
+       WARN_ON(tmp > U32_MAX);
+
+       return (u32)tmp;
+}
+
+static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
+                                            uint_fixed_16_16_t mul)
+{
+       u64 tmp;
+
+       tmp = (u64)val.val * mul.val;
+       tmp = tmp >> 16;
+
+       return clamp_u64_to_fixed16(tmp);
+}
+
+static inline uint_fixed_16_16_t div_fixed16(u32 val, u32 d)
+{
+       u64 tmp;
+
+       tmp = (u64)val << 16;
+       tmp = DIV_ROUND_UP_ULL(tmp, d);
+
+       return clamp_u64_to_fixed16(tmp);
+}
+
+static inline u32 div_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t d)
+{
+       u64 tmp;
+
+       tmp = (u64)val << 16;
+       tmp = DIV_ROUND_UP_ULL(tmp, d.val);
+       WARN_ON(tmp > U32_MAX);
+
+       return (u32)tmp;
+}
+
+static inline uint_fixed_16_16_t mul_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
+{
+       u64 tmp;
+
+       tmp = (u64)val * mul.val;
+
+       return clamp_u64_to_fixed16(tmp);
+}
+
+static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
+                                            uint_fixed_16_16_t add2)
+{
+       u64 tmp;
+
+       tmp = (u64)add1.val + add2.val;
+
+       return clamp_u64_to_fixed16(tmp);
+}
+
+static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
+                                                u32 add2)
+{
+       uint_fixed_16_16_t tmp_add2 = u32_to_fixed16(add2);
+       u64 tmp;
+
+       tmp = (u64)add1.val + tmp_add2.val;
+
+       return clamp_u64_to_fixed16(tmp);
+}
+
+#endif /* _I915_FIXED_H_ */
index 0c8aa57ce83b4033723ded1540986b628d92d772..c55b1f75c9803e72fe339020bec14d1718385ab2 100644 (file)
@@ -1740,6 +1740,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
         */
        err = i915_gem_object_wait(obj,
                                   I915_WAIT_INTERRUPTIBLE |
+                                  I915_WAIT_PRIORITY |
                                   (write_domain ? I915_WAIT_ALL : 0),
                                   MAX_SCHEDULE_TIMEOUT,
                                   to_rps_client(file));
@@ -2381,11 +2382,23 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
        invalidate_mapping_pages(mapping, 0, (loff_t)-1);
 }
 
+/*
+ * Move pages to appropriate lru and release the pagevec, decrementing the
+ * ref count of those pages.
+ */
+static void check_release_pagevec(struct pagevec *pvec)
+{
+       check_move_unevictable_pages(pvec);
+       __pagevec_release(pvec);
+       cond_resched();
+}
+
 static void
 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
                              struct sg_table *pages)
 {
        struct sgt_iter sgt_iter;
+       struct pagevec pvec;
        struct page *page;
 
        __i915_gem_object_release_shmem(obj, pages, true);
@@ -2395,6 +2408,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
        if (i915_gem_object_needs_bit17_swizzle(obj))
                i915_gem_object_save_bit_17_swizzle(obj, pages);
 
+       mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
+
+       pagevec_init(&pvec);
        for_each_sgt_page(page, sgt_iter, pages) {
                if (obj->mm.dirty)
                        set_page_dirty(page);
@@ -2402,8 +2418,11 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
                if (obj->mm.madv == I915_MADV_WILLNEED)
                        mark_page_accessed(page);
 
-               put_page(page);
+               if (!pagevec_add(&pvec, page))
+                       check_release_pagevec(&pvec);
        }
+       if (pagevec_count(&pvec))
+               check_release_pagevec(&pvec);
        obj->mm.dirty = false;
 
        sg_free_table(pages);
@@ -2483,7 +2502,7 @@ unlock:
        mutex_unlock(&obj->mm.lock);
 }
 
-static bool i915_sg_trim(struct sg_table *orig_st)
+bool i915_sg_trim(struct sg_table *orig_st)
 {
        struct sg_table new_st;
        struct scatterlist *sg, *new_sg;
@@ -2524,6 +2543,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        unsigned long last_pfn = 0;     /* suppress gcc warning */
        unsigned int max_segment = i915_sg_segment_size();
        unsigned int sg_page_sizes;
+       struct pagevec pvec;
        gfp_t noreclaim;
        int ret;
 
@@ -2559,6 +2579,7 @@ rebuild_st:
         * Fail silently without starting the shrinker
         */
        mapping = obj->base.filp->f_mapping;
+       mapping_set_unevictable(mapping);
        noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
        noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
 
@@ -2573,6 +2594,7 @@ rebuild_st:
                gfp_t gfp = noreclaim;
 
                do {
+                       cond_resched();
                        page = shmem_read_mapping_page_gfp(mapping, i, gfp);
                        if (likely(!IS_ERR(page)))
                                break;
@@ -2583,7 +2605,6 @@ rebuild_st:
                        }
 
                        i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
-                       cond_resched();
 
                        /*
                         * We've tried hard to allocate the memory by reaping
@@ -2673,8 +2694,14 @@ rebuild_st:
 err_sg:
        sg_mark_end(sg);
 err_pages:
-       for_each_sgt_page(page, sgt_iter, st)
-               put_page(page);
+       mapping_clear_unevictable(mapping);
+       pagevec_init(&pvec);
+       for_each_sgt_page(page, sgt_iter, st) {
+               if (!pagevec_add(&pvec, page))
+                       check_release_pagevec(&pvec);
+       }
+       if (pagevec_count(&pvec))
+               check_release_pagevec(&pvec);
        sg_free_table(st);
        kfree(st);
 
@@ -3530,6 +3557,8 @@ static void __sleep_rcu(struct rcu_head *rcu)
        struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
        struct drm_i915_private *i915 = s->i915;
 
+       destroy_rcu_head(&s->rcu);
+
        if (same_epoch(i915, s->epoch)) {
                INIT_WORK(&s->work, __sleep_work);
                queue_work(i915->wq, &s->work);
@@ -3646,6 +3675,7 @@ out_rearm:
        if (same_epoch(dev_priv, epoch)) {
                struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
                if (s) {
+                       init_rcu_head(&s->rcu);
                        s->i915 = dev_priv;
                        s->epoch = epoch;
                        call_rcu(&s->rcu, __sleep_rcu);
@@ -3743,7 +3773,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        start = ktime_get();
 
        ret = i915_gem_object_wait(obj,
-                                  I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
+                                  I915_WAIT_INTERRUPTIBLE |
+                                  I915_WAIT_PRIORITY |
+                                  I915_WAIT_ALL,
                                   to_wait_timeout(args->timeout_ns),
                                   to_rps_client(file));
 
@@ -4710,6 +4742,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
        INIT_LIST_HEAD(&obj->lut_list);
        INIT_LIST_HEAD(&obj->batch_pool_link);
 
+       init_rcu_head(&obj->rcu);
+
        obj->ops = ops;
 
        reservation_object_init(&obj->__builtin_resv);
@@ -4976,6 +5010,13 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
                container_of(head, typeof(*obj), rcu);
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
 
+       /*
+        * We reuse obj->rcu for the freed list, so we had better not treat
+        * it like a rcu_head from this point forwards. And we expect all
+        * objects to be freed via this path.
+        */
+       destroy_rcu_head(&obj->rcu);
+
        /*
         * Since we require blocking on struct_mutex to unbind the freed
         * object from the GPU before releasing resources back to the
@@ -5293,18 +5334,6 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
                I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
                           LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
 
-       if (HAS_PCH_NOP(dev_priv)) {
-               if (IS_IVYBRIDGE(dev_priv)) {
-                       u32 temp = I915_READ(GEN7_MSG_CTL);
-                       temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
-                       I915_WRITE(GEN7_MSG_CTL, temp);
-               } else if (INTEL_GEN(dev_priv) >= 7) {
-                       u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
-                       temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
-                       I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
-               }
-       }
-
        intel_gt_workarounds_apply(dev_priv);
 
        i915_gem_init_swizzling(dev_priv);
@@ -5951,7 +5980,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
         * the bits.
         */
        BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
-                    sizeof(atomic_t) * BITS_PER_BYTE);
+                    BITS_PER_TYPE(atomic_t));
 
        if (old) {
                WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
index 599c4f6eb1eab017f20ef7e06c4ce43aa817c42f..b0e4b976880c0bf6699197f4f0215e15289c23f4 100644 (file)
@@ -47,17 +47,19 @@ struct drm_i915_private;
 #define GEM_DEBUG_DECL(var) var
 #define GEM_DEBUG_EXEC(expr) expr
 #define GEM_DEBUG_BUG_ON(expr) GEM_BUG_ON(expr)
+#define GEM_DEBUG_WARN_ON(expr) GEM_WARN_ON(expr)
 
 #else
 
 #define GEM_SHOW_DEBUG() (0)
 
 #define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
-#define GEM_WARN_ON(expr) (BUILD_BUG_ON_INVALID(expr), 0)
+#define GEM_WARN_ON(expr) ({ unlikely(!!(expr)); })
 
 #define GEM_DEBUG_DECL(var)
 #define GEM_DEBUG_EXEC(expr) do { } while (0)
 #define GEM_DEBUG_BUG_ON(expr)
+#define GEM_DEBUG_WARN_ON(expr) ({ BUILD_BUG_ON_INVALID(expr); 0; })
 #endif
 
 #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
index f772593b99ab484563de0e1cad543e95254e0553..b97963db0287ab51a2cdf5e599bb5b01ac13c35f 100644 (file)
@@ -337,7 +337,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
        kref_init(&ctx->ref);
        list_add_tail(&ctx->link, &dev_priv->contexts.list);
        ctx->i915 = dev_priv;
-       ctx->sched.priority = I915_PRIORITY_NORMAL;
+       ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
 
        for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
                struct intel_context *ce = &ctx->__engine[n];
@@ -414,7 +414,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
        if (IS_ERR(ctx))
                return ctx;
 
-       if (USES_FULL_PPGTT(dev_priv)) {
+       if (HAS_FULL_PPGTT(dev_priv)) {
                struct i915_hw_ppgtt *ppgtt;
 
                ppgtt = i915_ppgtt_create(dev_priv, file_priv);
@@ -457,7 +457,7 @@ i915_gem_context_create_gvt(struct drm_device *dev)
        if (ret)
                return ERR_PTR(ret);
 
-       ctx = __create_hw_context(to_i915(dev), NULL);
+       ctx = i915_gem_create_context(to_i915(dev), NULL);
        if (IS_ERR(ctx))
                goto out;
 
@@ -504,7 +504,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
        }
 
        i915_gem_context_clear_bannable(ctx);
-       ctx->sched.priority = prio;
+       ctx->sched.priority = I915_USER_PRIORITY(prio);
        ctx->ring_size = PAGE_SIZE;
 
        GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
@@ -879,7 +879,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
                args->value = i915_gem_context_is_bannable(ctx);
                break;
        case I915_CONTEXT_PARAM_PRIORITY:
-               args->value = ctx->sched.priority;
+               args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
                break;
        default:
                ret = -EINVAL;
@@ -948,7 +948,8 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
                                 !capable(CAP_SYS_NICE))
                                ret = -EPERM;
                        else
-                               ctx->sched.priority = priority;
+                               ctx->sched.priority =
+                                       I915_USER_PRIORITY(priority);
                }
                break;
 
index 08165f6a0a842482cd1eec32f5bd131e39e44500..f6d870b1f73e397971e4f55714d8524f68220395 100644 (file)
@@ -163,6 +163,7 @@ struct i915_gem_context {
        /** engine: per-engine logical HW state */
        struct intel_context {
                struct i915_gem_context *gem_context;
+               struct intel_engine_cs *active;
                struct i915_vma *state;
                struct intel_ring *ring;
                u32 *lrc_reg_state;
index 09187286d34627df882e4ede753db7e40da41934..d4fac09095f862aed3131243957059de2df4f6b0 100644 (file)
@@ -460,7 +460,7 @@ eb_validate_vma(struct i915_execbuffer *eb,
         * any non-page-aligned or non-canonical addresses.
         */
        if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
-                    entry->offset != gen8_canonical_addr(entry->offset & PAGE_MASK)))
+                    entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
                return -EINVAL;
 
        /* pad_to_size was once a reserved field, so sanitize it */
@@ -1268,7 +1268,7 @@ relocate_entry(struct i915_vma *vma,
                else if (gen >= 4)
                        len = 4;
                else
-                       len = 3;
+                       len = 6;
 
                batch = reloc_gpu(eb, vma, len);
                if (IS_ERR(batch))
@@ -1309,6 +1309,11 @@ relocate_entry(struct i915_vma *vma,
                        *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
                        *batch++ = addr;
                        *batch++ = target_offset;
+
+                       /* And again for good measure (blb/pnv) */
+                       *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+                       *batch++ = addr;
+                       *batch++ = target_offset;
                }
 
                goto out;
index 56c7f86373112b96212fc7952a5968854f9bdbb1..add1fe7aeb930f2e21e73d12dbf96f095decc5be 100644 (file)
@@ -133,55 +133,6 @@ static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
        i915->ggtt.invalidate(i915);
 }
 
-int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
-                               int enable_ppgtt)
-{
-       bool has_full_ppgtt;
-       bool has_full_48bit_ppgtt;
-
-       if (!dev_priv->info.has_aliasing_ppgtt)
-               return 0;
-
-       has_full_ppgtt = dev_priv->info.has_full_ppgtt;
-       has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
-
-       if (intel_vgpu_active(dev_priv)) {
-               /* GVT-g has no support for 32bit ppgtt */
-               has_full_ppgtt = false;
-               has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
-       }
-
-       /*
-        * We don't allow disabling PPGTT for gen9+ as it's a requirement for
-        * execlists, the sole mechanism available to submit work.
-        */
-       if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
-               return 0;
-
-       if (enable_ppgtt == 1)
-               return 1;
-
-       if (enable_ppgtt == 2 && has_full_ppgtt)
-               return 2;
-
-       if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
-               return 3;
-
-       /* Disable ppgtt on SNB if VT-d is on. */
-       if (IS_GEN6(dev_priv) && intel_vtd_active()) {
-               DRM_INFO("Disabling PPGTT because VT-d is on\n");
-               return 0;
-       }
-
-       if (has_full_48bit_ppgtt)
-               return 3;
-
-       if (has_full_ppgtt)
-               return 2;
-
-       return 1;
-}
-
 static int ppgtt_bind_vma(struct i915_vma *vma,
                          enum i915_cache_level cache_level,
                          u32 unused)
@@ -235,9 +186,9 @@ static void clear_pages(struct i915_vma *vma)
        memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
 }
 
-static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
-                                 enum i915_cache_level level,
-                                 u32 flags)
+static u64 gen8_pte_encode(dma_addr_t addr,
+                          enum i915_cache_level level,
+                          u32 flags)
 {
        gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
 
@@ -274,9 +225,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
 #define gen8_pdpe_encode gen8_pde_encode
 #define gen8_pml4e_encode gen8_pde_encode
 
-static gen6_pte_t snb_pte_encode(dma_addr_t addr,
-                                enum i915_cache_level level,
-                                u32 unused)
+static u64 snb_pte_encode(dma_addr_t addr,
+                         enum i915_cache_level level,
+                         u32 flags)
 {
        gen6_pte_t pte = GEN6_PTE_VALID;
        pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -296,9 +247,9 @@ static gen6_pte_t snb_pte_encode(dma_addr_t addr,
        return pte;
 }
 
-static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
-                                enum i915_cache_level level,
-                                u32 unused)
+static u64 ivb_pte_encode(dma_addr_t addr,
+                         enum i915_cache_level level,
+                         u32 flags)
 {
        gen6_pte_t pte = GEN6_PTE_VALID;
        pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -320,9 +271,9 @@ static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
        return pte;
 }
 
-static gen6_pte_t byt_pte_encode(dma_addr_t addr,
-                                enum i915_cache_level level,
-                                u32 flags)
+static u64 byt_pte_encode(dma_addr_t addr,
+                         enum i915_cache_level level,
+                         u32 flags)
 {
        gen6_pte_t pte = GEN6_PTE_VALID;
        pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -336,9 +287,9 @@ static gen6_pte_t byt_pte_encode(dma_addr_t addr,
        return pte;
 }
 
-static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
-                                enum i915_cache_level level,
-                                u32 unused)
+static u64 hsw_pte_encode(dma_addr_t addr,
+                         enum i915_cache_level level,
+                         u32 flags)
 {
        gen6_pte_t pte = GEN6_PTE_VALID;
        pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -349,9 +300,9 @@ static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
        return pte;
 }
 
-static gen6_pte_t iris_pte_encode(dma_addr_t addr,
-                                 enum i915_cache_level level,
-                                 u32 unused)
+static u64 iris_pte_encode(dma_addr_t addr,
+                          enum i915_cache_level level,
+                          u32 flags)
 {
        gen6_pte_t pte = GEN6_PTE_VALID;
        pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -629,10 +580,9 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
         * region, including any PTEs which happen to point to scratch.
         *
         * This is only relevant for the 48b PPGTT where we support
-        * huge-gtt-pages, see also i915_vma_insert().
-        *
-        * TODO: we should really consider write-protecting the scratch-page and
-        * sharing between ppgtt
+        * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
+        * scratch (read-only) between all vm, we create one 64k scratch page
+        * for all.
         */
        size = I915_GTT_PAGE_SIZE_4K;
        if (i915_vm_is_48bit(vm) &&
@@ -715,14 +665,13 @@ static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
 static void gen8_initialize_pt(struct i915_address_space *vm,
                               struct i915_page_table *pt)
 {
-       fill_px(vm, pt,
-               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
+       fill_px(vm, pt, vm->scratch_pte);
 }
 
-static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt,
+static void gen6_initialize_pt(struct i915_address_space *vm,
                               struct i915_page_table *pt)
 {
-       fill32_px(&ppgtt->base.vm, pt, ppgtt->scratch_pte);
+       fill32_px(vm, pt, vm->scratch_pte);
 }
 
 static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
@@ -856,15 +805,13 @@ static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
 /* Removes entries from a single page table, releasing it if it's empty.
  * Caller can use the return value to update higher-level entries.
  */
-static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
+static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
                                struct i915_page_table *pt,
                                u64 start, u64 length)
 {
        unsigned int num_entries = gen8_pte_count(start, length);
        unsigned int pte = gen8_pte_index(start);
        unsigned int pte_end = pte + num_entries;
-       const gen8_pte_t scratch_pte =
-               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
        gen8_pte_t *vaddr;
 
        GEM_BUG_ON(num_entries > pt->used_ptes);
@@ -875,7 +822,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
 
        vaddr = kmap_atomic_px(pt);
        while (pte < pte_end)
-               vaddr[pte++] = scratch_pte;
+               vaddr[pte++] = vm->scratch_pte;
        kunmap_atomic(vaddr);
 
        return false;
@@ -1208,7 +1155,7 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
                        if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
                                u16 i;
 
-                               encode = pte_encode | vma->vm->scratch_page.daddr;
+                               encode = vma->vm->scratch_pte;
                                vaddr = kmap_atomic_px(pd->page_table[idx.pde]);
 
                                for (i = 1; i < index; i += 16)
@@ -1261,10 +1208,35 @@ static int gen8_init_scratch(struct i915_address_space *vm)
 {
        int ret;
 
+       /*
+        * If everybody agrees to not to write into the scratch page,
+        * we can reuse it for all vm, keeping contexts and processes separate.
+        */
+       if (vm->has_read_only &&
+           vm->i915->kernel_context &&
+           vm->i915->kernel_context->ppgtt) {
+               struct i915_address_space *clone =
+                       &vm->i915->kernel_context->ppgtt->vm;
+
+               GEM_BUG_ON(!clone->has_read_only);
+
+               vm->scratch_page.order = clone->scratch_page.order;
+               vm->scratch_pte = clone->scratch_pte;
+               vm->scratch_pt  = clone->scratch_pt;
+               vm->scratch_pd  = clone->scratch_pd;
+               vm->scratch_pdp = clone->scratch_pdp;
+               return 0;
+       }
+
        ret = setup_scratch_page(vm, __GFP_HIGHMEM);
        if (ret)
                return ret;
 
+       vm->scratch_pte =
+               gen8_pte_encode(vm->scratch_page.daddr,
+                               I915_CACHE_LLC,
+                               PTE_READ_ONLY);
+
        vm->scratch_pt = alloc_pt(vm);
        if (IS_ERR(vm->scratch_pt)) {
                ret = PTR_ERR(vm->scratch_pt);
@@ -1336,6 +1308,9 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
 
 static void gen8_free_scratch(struct i915_address_space *vm)
 {
+       if (!vm->scratch_page.daddr)
+               return;
+
        if (use_4lvl(vm))
                free_pdp(vm, vm->scratch_pdp);
        free_pd(vm, vm->scratch_pd);
@@ -1573,8 +1548,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
 static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 {
        struct i915_address_space *vm = &ppgtt->vm;
-       const gen8_pte_t scratch_pte =
-               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+       const gen8_pte_t scratch_pte = vm->scratch_pte;
        u64 start = 0, length = ppgtt->vm.total;
 
        if (use_4lvl(vm)) {
@@ -1647,16 +1621,12 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
        ppgtt->vm.i915 = i915;
        ppgtt->vm.dma = &i915->drm.pdev->dev;
 
-       ppgtt->vm.total = USES_FULL_48BIT_PPGTT(i915) ?
+       ppgtt->vm.total = HAS_FULL_48BIT_PPGTT(i915) ?
                1ULL << 48 :
                1ULL << 32;
 
-       /*
-        * From bdw, there is support for read-only pages in the PPGTT.
-        *
-        * XXX GVT is not honouring the lack of RW in the PTE bits.
-        */
-       ppgtt->vm.has_read_only = !intel_vgpu_active(i915);
+       /* From bdw, there is support for read-only pages in the PPGTT. */
+       ppgtt->vm.has_read_only = true;
 
        i915_address_space_init(&ppgtt->vm, i915);
 
@@ -1721,7 +1691,7 @@ err_free:
 static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
 {
        struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
-       const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
+       const gen6_pte_t scratch_pte = base->vm.scratch_pte;
        struct i915_page_table *pt;
        u32 pte, pde;
 
@@ -1757,7 +1727,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
                        if (i == 4)
                                continue;
 
-                       seq_printf(m, "\t\t(%03d, %04d) %08lx: ",
+                       seq_printf(m, "\t\t(%03d, %04d) %08llx: ",
                                   pde, pte,
                                   (pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE);
                        for (i = 0; i < 4; i++) {
@@ -1782,19 +1752,6 @@ static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
                  ppgtt->pd_addr + pde);
 }
 
-static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
-{
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-
-       for_each_engine(engine, dev_priv, id) {
-               u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
-                                GEN8_GFX_PPGTT_48B : 0;
-               I915_WRITE(RING_MODE_GEN7(engine),
-                          _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
-       }
-}
-
 static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
 {
        struct intel_engine_cs *engine;
@@ -1834,7 +1791,8 @@ static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
        ecochk = I915_READ(GAM_ECOCHK);
        I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
 
-       I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+       if (HAS_PPGTT(dev_priv)) /* may be disabled for VT-d */
+               I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 }
 
 /* PPGTT support for Sandybdrige/Gen6 and later */
@@ -1846,7 +1804,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
        unsigned int pde = first_entry / GEN6_PTES;
        unsigned int pte = first_entry % GEN6_PTES;
        unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
-       const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
+       const gen6_pte_t scratch_pte = vm->scratch_pte;
 
        while (num_entries) {
                struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
@@ -1937,7 +1895,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
                        if (IS_ERR(pt))
                                goto unwind_out;
 
-                       gen6_initialize_pt(ppgtt, pt);
+                       gen6_initialize_pt(vm, pt);
                        ppgtt->base.pd.page_table[pde] = pt;
 
                        if (i915_vma_is_bound(ppgtt->vma,
@@ -1975,9 +1933,9 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
        if (ret)
                return ret;
 
-       ppgtt->scratch_pte =
-               vm->pte_encode(vm->scratch_page.daddr,
-                              I915_CACHE_NONE, PTE_READ_ONLY);
+       vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
+                                        I915_CACHE_NONE,
+                                        PTE_READ_ONLY);
 
        vm->scratch_pt = alloc_pt(vm);
        if (IS_ERR(vm->scratch_pt)) {
@@ -1985,7 +1943,7 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
                return PTR_ERR(vm->scratch_pt);
        }
 
-       gen6_initialize_pt(ppgtt, vm->scratch_pt);
+       gen6_initialize_pt(vm, vm->scratch_pt);
        gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
                ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
 
@@ -2237,23 +2195,10 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
 {
        gtt_write_workarounds(dev_priv);
 
-       /* In the case of execlists, PPGTT is enabled by the context descriptor
-        * and the PDPs are contained within the context itself.  We don't
-        * need to do anything here. */
-       if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
-               return 0;
-
-       if (!USES_PPGTT(dev_priv))
-               return 0;
-
        if (IS_GEN6(dev_priv))
                gen6_ppgtt_enable(dev_priv);
        else if (IS_GEN7(dev_priv))
                gen7_ppgtt_enable(dev_priv);
-       else if (INTEL_GEN(dev_priv) >= 8)
-               gen8_ppgtt_enable(dev_priv);
-       else
-               MISSING_CASE(INTEL_GEN(dev_priv));
 
        return 0;
 }
@@ -2543,8 +2488,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
        struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
        unsigned first_entry = start / I915_GTT_PAGE_SIZE;
        unsigned num_entries = length / I915_GTT_PAGE_SIZE;
-       const gen8_pte_t scratch_pte =
-               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+       const gen8_pte_t scratch_pte = vm->scratch_pte;
        gen8_pte_t __iomem *gtt_base =
                (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
        const int max_entries = ggtt_total_entries(ggtt) - first_entry;
@@ -2669,8 +2613,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
                 first_entry, num_entries, max_entries))
                num_entries = max_entries;
 
-       scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
-                                    I915_CACHE_LLC, 0);
+       scratch_pte = vm->scratch_pte;
 
        for (i = 0; i < num_entries; i++)
                iowrite32(scratch_pte, &gtt_base[i]);
@@ -2952,7 +2895,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
        /* And finally clear the reserved guard page */
        ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
 
-       if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
+       if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) {
                ret = i915_gem_init_aliasing_ppgtt(dev_priv);
                if (ret)
                        goto err;
@@ -3076,6 +3019,10 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
                return ret;
        }
 
+       ggtt->vm.scratch_pte =
+               ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr,
+                                   I915_CACHE_NONE, 0);
+
        return 0;
 }
 
@@ -3275,7 +3222,7 @@ static void bdw_setup_private_ppat(struct intel_ppat *ppat)
        ppat->match = bdw_private_pat_match;
        ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
 
-       if (!USES_PPGTT(ppat->i915)) {
+       if (!HAS_PPGTT(ppat->i915)) {
                /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
                 * so RTL will always use the value corresponding to
                 * pat_sel = 000".
@@ -3402,7 +3349,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
        ggtt->vm.cleanup = gen6_gmch_remove;
        ggtt->vm.insert_page = gen8_ggtt_insert_page;
        ggtt->vm.clear_range = nop_clear_range;
-       if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
+       if (intel_scanout_needs_vtd_wa(dev_priv))
                ggtt->vm.clear_range = gen8_ggtt_clear_range;
 
        ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
@@ -3413,6 +3360,11 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
                ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
                if (ggtt->vm.clear_range != nop_clear_range)
                        ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+
+               /* Prevent recursively calling stop_machine() and deadlocks. */
+               dev_info(dev_priv->drm.dev,
+                        "Disabling error capture for VT-d workaround\n");
+               i915_disable_error_state(dev_priv, -ENODEV);
        }
 
        ggtt->invalidate = gen6_ggtt_invalidate;
@@ -3422,6 +3374,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
        ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
        ggtt->vm.vma_ops.clear_pages = clear_pages;
 
+       ggtt->vm.pte_encode = gen8_pte_encode;
+
        setup_private_pat(dev_priv);
 
        return ggtt_probe_common(ggtt, size);
@@ -3609,7 +3563,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
        /* Only VLV supports read-only GGTT mappings */
        ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
 
-       if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
+       if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv))
                ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
@@ -3711,7 +3665,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
 }
 
 static struct scatterlist *
-rotate_pages(const dma_addr_t *in, unsigned int offset,
+rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
             unsigned int width, unsigned int height,
             unsigned int stride,
             struct sg_table *st, struct scatterlist *sg)
@@ -3720,7 +3674,7 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
        unsigned int src_idx;
 
        for (column = 0; column < width; column++) {
-               src_idx = stride * (height - 1) + column;
+               src_idx = stride * (height - 1) + column + offset;
                for (row = 0; row < height; row++) {
                        st->nents++;
                        /* We don't need the pages, but need to initialize
@@ -3728,7 +3682,8 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
                         * The only thing we need are DMA addresses.
                         */
                        sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
-                       sg_dma_address(sg) = in[offset + src_idx];
+                       sg_dma_address(sg) =
+                               i915_gem_object_get_dma_address(obj, src_idx);
                        sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
                        sg = sg_next(sg);
                        src_idx -= stride;
@@ -3742,22 +3697,11 @@ static noinline struct sg_table *
 intel_rotate_pages(struct intel_rotation_info *rot_info,
                   struct drm_i915_gem_object *obj)
 {
-       const unsigned long n_pages = obj->base.size / I915_GTT_PAGE_SIZE;
        unsigned int size = intel_rotation_info_size(rot_info);
-       struct sgt_iter sgt_iter;
-       dma_addr_t dma_addr;
-       unsigned long i;
-       dma_addr_t *page_addr_list;
        struct sg_table *st;
        struct scatterlist *sg;
        int ret = -ENOMEM;
-
-       /* Allocate a temporary list of source pages for random access. */
-       page_addr_list = kvmalloc_array(n_pages,
-                                       sizeof(dma_addr_t),
-                                       GFP_KERNEL);
-       if (!page_addr_list)
-               return ERR_PTR(ret);
+       int i;
 
        /* Allocate target SG list. */
        st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -3768,29 +3712,20 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
        if (ret)
                goto err_sg_alloc;
 
-       /* Populate source page list from the object. */
-       i = 0;
-       for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
-               page_addr_list[i++] = dma_addr;
-
-       GEM_BUG_ON(i != n_pages);
        st->nents = 0;
        sg = st->sgl;
 
        for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
-               sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
+               sg = rotate_pages(obj, rot_info->plane[i].offset,
                                  rot_info->plane[i].width, rot_info->plane[i].height,
                                  rot_info->plane[i].stride, st, sg);
        }
 
-       kvfree(page_addr_list);
-
        return st;
 
 err_sg_alloc:
        kfree(st);
 err_st_alloc:
-       kvfree(page_addr_list);
 
        DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
                         obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
@@ -3835,6 +3770,8 @@ intel_partial_pages(const struct i915_ggtt_view *view,
                count -= len >> PAGE_SHIFT;
                if (count == 0) {
                        sg_mark_end(sg);
+                       i915_sg_trim(st); /* Drop any unused tail entries. */
+
                        return st;
                }
 
index 7e2af5f4f39bcbb5ec355257d41decea7b45d019..4874da09a3c471d24697b55b4ab7687d2d76afbf 100644 (file)
 #include "i915_selftest.h"
 #include "i915_timeline.h"
 
-#define I915_GTT_PAGE_SIZE_4K BIT(12)
-#define I915_GTT_PAGE_SIZE_64K BIT(16)
-#define I915_GTT_PAGE_SIZE_2M BIT(21)
+#define I915_GTT_PAGE_SIZE_4K  BIT_ULL(12)
+#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16)
+#define I915_GTT_PAGE_SIZE_2M  BIT_ULL(21)
 
 #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
 #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
 
+#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
+
 #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
 
 #define I915_FENCE_REG_NONE -1
@@ -287,6 +289,7 @@ struct i915_address_space {
 
        struct mutex mutex; /* protects vma and our lists */
 
+       u64 scratch_pte;
        struct i915_page_dma scratch_page;
        struct i915_page_table *scratch_pt;
        struct i915_page_directory *scratch_pd;
@@ -333,12 +336,11 @@ struct i915_address_space {
        /* Some systems support read-only mappings for GGTT and/or PPGTT */
        bool has_read_only:1;
 
-       /* FIXME: Need a more generic return type */
-       gen6_pte_t (*pte_encode)(dma_addr_t addr,
-                                enum i915_cache_level level,
-                                u32 flags); /* Create a valid PTE */
-       /* flags for pte_encode */
+       u64 (*pte_encode)(dma_addr_t addr,
+                         enum i915_cache_level level,
+                         u32 flags); /* Create a valid PTE */
 #define PTE_READ_ONLY  (1<<0)
+
        int (*allocate_va_range)(struct i915_address_space *vm,
                                 u64 start, u64 length);
        void (*clear_range)(struct i915_address_space *vm,
@@ -420,7 +422,6 @@ struct gen6_hw_ppgtt {
 
        struct i915_vma *vma;
        gen6_pte_t __iomem *pd_addr;
-       gen6_pte_t scratch_pte;
 
        unsigned int pin_count;
        bool scan_for_unused_pt;
@@ -659,20 +660,20 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
                        u64 start, u64 end, unsigned int flags);
 
 /* Flags used by pin/bind&friends. */
-#define PIN_NONBLOCK           BIT(0)
-#define PIN_MAPPABLE           BIT(1)
-#define PIN_ZONE_4G            BIT(2)
-#define PIN_NONFAULT           BIT(3)
-#define PIN_NOEVICT            BIT(4)
-
-#define PIN_MBZ                        BIT(5) /* I915_VMA_PIN_OVERFLOW */
-#define PIN_GLOBAL             BIT(6) /* I915_VMA_GLOBAL_BIND */
-#define PIN_USER               BIT(7) /* I915_VMA_LOCAL_BIND */
-#define PIN_UPDATE             BIT(8)
-
-#define PIN_HIGH               BIT(9)
-#define PIN_OFFSET_BIAS                BIT(10)
-#define PIN_OFFSET_FIXED       BIT(11)
+#define PIN_NONBLOCK           BIT_ULL(0)
+#define PIN_MAPPABLE           BIT_ULL(1)
+#define PIN_ZONE_4G            BIT_ULL(2)
+#define PIN_NONFAULT           BIT_ULL(3)
+#define PIN_NOEVICT            BIT_ULL(4)
+
+#define PIN_MBZ                        BIT_ULL(5) /* I915_VMA_PIN_OVERFLOW */
+#define PIN_GLOBAL             BIT_ULL(6) /* I915_VMA_GLOBAL_BIND */
+#define PIN_USER               BIT_ULL(7) /* I915_VMA_LOCAL_BIND */
+#define PIN_UPDATE             BIT_ULL(8)
+
+#define PIN_HIGH               BIT_ULL(9)
+#define PIN_OFFSET_BIAS                BIT_ULL(10)
+#define PIN_OFFSET_FIXED       BIT_ULL(11)
 #define PIN_OFFSET_MASK                (-I915_GTT_PAGE_SIZE)
 
 #endif
index 8762d17b66591e2afc8fc9647bef2784145f2c19..8123bf0e4807d42c4cd4a40152b922c4fa012498 100644 (file)
@@ -27,7 +27,7 @@
  *
  */
 
-#include <generated/utsrelease.h>
+#include <linux/utsname.h>
 #include <linux/stop_machine.h>
 #include <linux/zlib.h>
 #include <drm/drm_print.h>
@@ -512,7 +512,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
                        err_printf(m, "  SYNC_2: 0x%08x\n",
                                   ee->semaphore_mboxes[2]);
        }
-       if (USES_PPGTT(m->i915)) {
+       if (HAS_PPGTT(m->i915)) {
                err_printf(m, "  GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
 
                if (INTEL_GEN(m->i915) >= 8) {
@@ -648,9 +648,12 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
                return 0;
        }
 
+       if (IS_ERR(error))
+               return PTR_ERR(error);
+
        if (*error->error_msg)
                err_printf(m, "%s\n", error->error_msg);
-       err_printf(m, "Kernel: " UTS_RELEASE "\n");
+       err_printf(m, "Kernel: %s\n", init_utsname()->release);
        ts = ktime_to_timespec64(error->time);
        err_printf(m, "Time: %lld s %ld us\n",
                   (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
@@ -999,7 +1002,6 @@ i915_error_object_create(struct drm_i915_private *i915,
        }
 
        compress_fini(&compress, dst);
-       ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
        return dst;
 }
 
@@ -1268,7 +1270,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
        ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
                                                  engine);
 
-       if (USES_PPGTT(dev_priv)) {
+       if (HAS_PPGTT(dev_priv)) {
                int i;
 
                ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
@@ -1785,6 +1787,14 @@ static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
        return epoch;
 }
 
+static void capture_finish(struct i915_gpu_state *error)
+{
+       struct i915_ggtt *ggtt = &error->i915->ggtt;
+       const u64 slot = ggtt->error_capture.start;
+
+       ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
+}
+
 static int capture(void *data)
 {
        struct i915_gpu_state *error = data;
@@ -1809,6 +1819,7 @@ static int capture(void *data)
 
        error->epoch = capture_find_epoch(error);
 
+       capture_finish(error);
        return 0;
 }
 
@@ -1859,6 +1870,7 @@ void i915_capture_error_state(struct drm_i915_private *i915,
        error = i915_capture_gpu_state(i915);
        if (!error) {
                DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
+               i915_disable_error_state(i915, -ENOMEM);
                return;
        }
 
@@ -1914,5 +1926,14 @@ void i915_reset_error_state(struct drm_i915_private *i915)
        i915->gpu_error.first_error = NULL;
        spin_unlock_irq(&i915->gpu_error.lock);
 
-       i915_gpu_state_put(error);
+       if (!IS_ERR(error))
+               i915_gpu_state_put(error);
+}
+
+void i915_disable_error_state(struct drm_i915_private *i915, int err)
+{
+       spin_lock_irq(&i915->gpu_error.lock);
+       if (!i915->gpu_error.first_error)
+               i915->gpu_error.first_error = ERR_PTR(err);
+       spin_unlock_irq(&i915->gpu_error.lock);
 }
index 8710fb18ed746cface7e9a7b2d6d6ac7cd06b2b4..3ec89a504de52331ade6a9452a844527d84ec515 100644 (file)
@@ -343,6 +343,7 @@ static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
 
 struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
 void i915_reset_error_state(struct drm_i915_private *i915);
+void i915_disable_error_state(struct drm_i915_private *i915, int err);
 
 #else
 
@@ -355,13 +356,18 @@ static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
 static inline struct i915_gpu_state *
 i915_first_error_state(struct drm_i915_private *i915)
 {
-       return NULL;
+       return ERR_PTR(-ENODEV);
 }
 
 static inline void i915_reset_error_state(struct drm_i915_private *i915)
 {
 }
 
+static inline void i915_disable_error_state(struct drm_i915_private *i915,
+                                           int err)
+{
+}
+
 #endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
 
 #endif /* _I915_GPU_ERROR_H_ */
index 2e242270e270865ca2f1cc0db2ebbf03dac11185..d447d7d508f483c62baecad23035a60702fd6a3c 100644 (file)
@@ -2887,21 +2887,39 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
        return ret;
 }
 
+static inline u32 gen8_master_intr_disable(void __iomem * const regs)
+{
+       raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
+
+       /*
+        * Now with master disabled, get a sample of level indications
+        * for this interrupt. Indications will be cleared on related acks.
+        * New indications can and will light up during processing,
+        * and will generate new interrupt after enabling master.
+        */
+       return raw_reg_read(regs, GEN8_MASTER_IRQ);
+}
+
+static inline void gen8_master_intr_enable(void __iomem * const regs)
+{
+       raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+}
+
 static irqreturn_t gen8_irq_handler(int irq, void *arg)
 {
        struct drm_i915_private *dev_priv = to_i915(arg);
+       void __iomem * const regs = dev_priv->regs;
        u32 master_ctl;
        u32 gt_iir[4];
 
        if (!intel_irqs_enabled(dev_priv))
                return IRQ_NONE;
 
-       master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
-       master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
-       if (!master_ctl)
+       master_ctl = gen8_master_intr_disable(regs);
+       if (!master_ctl) {
+               gen8_master_intr_enable(regs);
                return IRQ_NONE;
-
-       I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
+       }
 
        /* Find, clear, then process each source of interrupt */
        gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
@@ -2913,7 +2931,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
                enable_rpm_wakeref_asserts(dev_priv);
        }
 
-       I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+       gen8_master_intr_enable(regs);
 
        gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
 
@@ -3111,6 +3129,24 @@ gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
                intel_opregion_asle_intr(dev_priv);
 }
 
+static inline u32 gen11_master_intr_disable(void __iomem * const regs)
+{
+       raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
+
+       /*
+        * Now with master disabled, get a sample of level indications
+        * for this interrupt. Indications will be cleared on related acks.
+        * New indications can and will light up during processing,
+        * and will generate new interrupt after enabling master.
+        */
+       return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
+}
+
+static inline void gen11_master_intr_enable(void __iomem * const regs)
+{
+       raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
+}
+
 static irqreturn_t gen11_irq_handler(int irq, void *arg)
 {
        struct drm_i915_private * const i915 = to_i915(arg);
@@ -3121,13 +3157,11 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
        if (!intel_irqs_enabled(i915))
                return IRQ_NONE;
 
-       master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
-       master_ctl &= ~GEN11_MASTER_IRQ;
-       if (!master_ctl)
+       master_ctl = gen11_master_intr_disable(regs);
+       if (!master_ctl) {
+               gen11_master_intr_enable(regs);
                return IRQ_NONE;
-
-       /* Disable interrupts. */
-       raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
+       }
 
        /* Find, clear, then process each source of interrupt. */
        gen11_gt_irq_handler(i915, master_ctl);
@@ -3147,8 +3181,7 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
 
        gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
 
-       /* Acknowledge and enable interrupts. */
-       raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
+       gen11_master_intr_enable(regs);
 
        gen11_gu_misc_irq_handler(i915, gu_misc_iir);
 
@@ -3598,8 +3631,7 @@ static void gen8_irq_reset(struct drm_device *dev)
        struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe;
 
-       I915_WRITE(GEN8_MASTER_IRQ, 0);
-       POSTING_READ(GEN8_MASTER_IRQ);
+       gen8_master_intr_disable(dev_priv->regs);
 
        gen8_gt_irq_reset(dev_priv);
 
@@ -3641,13 +3673,15 @@ static void gen11_irq_reset(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe;
 
-       I915_WRITE(GEN11_GFX_MSTR_IRQ, 0);
-       POSTING_READ(GEN11_GFX_MSTR_IRQ);
+       gen11_master_intr_disable(dev_priv->regs);
 
        gen11_gt_irq_reset(dev_priv);
 
        I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
 
+       I915_WRITE(EDP_PSR_IMR, 0xffffffff);
+       I915_WRITE(EDP_PSR_IIR, 0xffffffff);
+
        for_each_pipe(dev_priv, pipe)
                if (intel_display_power_is_enabled(dev_priv,
                                                   POWER_DOMAIN_PIPE(pipe)))
@@ -4244,8 +4278,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
        if (HAS_PCH_SPLIT(dev_priv))
                ibx_irq_postinstall(dev);
 
-       I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
-       POSTING_READ(GEN8_MASTER_IRQ);
+       gen8_master_intr_enable(dev_priv->regs);
 
        return 0;
 }
@@ -4307,8 +4340,7 @@ static int gen11_irq_postinstall(struct drm_device *dev)
 
        I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
 
-       I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
-       POSTING_READ(GEN11_GFX_MSTR_IRQ);
+       gen11_master_intr_enable(dev_priv->regs);
 
        return 0;
 }
@@ -4834,6 +4866,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
                dev_priv->display_irqs_enabled = false;
 
        dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
+       /* If we have MST support, we want to avoid doing short HPD IRQ storm
+        * detection, as short HPD storms will occur as a natural part of
+        * sideband messaging with MST.
+        * On older platforms however, IRQ storms can occur with both long and
+        * short pulses, as seen on some G4x systems.
+        */
+       dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
 
        dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
        dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
index 4abd2e8b50839ee217fb6945461c13b5073cf212..4acdb94555b727d3a90072cc4ff80ee148410b29 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #include <linux/sysfs.h>
index b812d16162ac7816d50aeb69d2b8bf86639ebe5e..0e667f1a8aa19ea1b02d8847016871a9c5faa8b1 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #ifndef __I915_OA_BDW_H__
index cb6f304ec16a3e5eaccdda5941979745f80b9873..a44195c399230f6ef39b0466ca1efbab7c783de6 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #include <linux/sysfs.h>
index 690b963a23833550a1ed7f897d6c9c99eb26fd2f..679e92cf4f1dfde80cf002b77cf1402747df8ab3 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #ifndef __I915_OA_BXT_H__
index 8641ae30e343a8a423d35a87e964c6f2894bcf00..7f60d51b87611e686dc22ea6c994992be96921d2 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #include <linux/sysfs.h>
index 1f3268ef2ea2b142653c2943fa430d0a37d7a9d3..4d6025559bbe72596afc244f7e26fa443af59832 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #ifndef __I915_OA_CFLGT2_H__
index 792facdb6702bffdcb808f8bc748518c8299ac8e..a92c38e3a0ce96e42f96c361941a1b424616140c 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #include <linux/sysfs.h>
index c13b5aac01b9b6b6475d3efde6835a1ac7de5952..0697f407740296e91b02177a4cebd581835a5102 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #ifndef __I915_OA_CFLGT3_H__
index 556febb2c3c870142aa24d6f29fe15e126224209..71ec889a01145f998f1856bc3eb540442663094d 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #include <linux/sysfs.h>
index b9622496979ed88155dd9192a37c58f552b4aeb1..0986eae3135f87dd76e2a5306b563d687b837824 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #ifndef __I915_OA_CHV_H__
index ba9140c87cc0ba7de03f0c36ef91ebcaf372b4c8..5c23d883d6c941247f7783498e1005951feef58f 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #include <linux/sysfs.h>
index fb918b1311058935c77fd97b79dc5e30439cfd5c..e830a406aff216157cdf7f6352728126fcad5456 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #ifndef __I915_OA_CNL_H__
index 971db587957c6c306710983f33e17f7908bf6167..4bdda66df7d22bbb6cf2ec2a963b533a61dc0c08 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #include <linux/sysfs.h>
index 63bd113f4bc9deb7e4a9a9bf1f6c9e4a035c293b..06dedf991edb3234746d88cb5f3e93b4753141c3 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #ifndef __I915_OA_GLK_H__
index 434a9b96d7abdc44f4ecc02116e40536082b4f8c..cc6526fdd2bdc7e7b764285313b9cc13001224f3 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #include <linux/sysfs.h>
index 74d03439c157ebc9c169b649f031564edecad847..3d0c870cd0bdbd5e832800c077c80c7a77da5c25 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #ifndef __I915_OA_HSW_H__
index a5667926e3de88e1e7d431086628fbb399027c22..baa51427a543facad55c23105436e0512c26a4f0 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #include <linux/sysfs.h>
index ae1c24aafe4fccd720681695f0ed357fb8bdd046..24eaa97d61ba7629101d96f7090449d91602a3bc 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #ifndef __I915_OA_ICL_H__
index 2fa98a40bbc84195daaa0f40c7ad47be0dd199f0..168e49ab0d4d7d6e9c5965caa21b356dc9bc0df5 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #include <linux/sysfs.h>
index 25b803546dc1bcd5557f23ea134252172ee02ff3..a55398a904de4b272fe60faf505b949231eafb56 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #ifndef __I915_OA_KBLGT2_H__
index f3cb6679a1bcfdb62f7d6e3a2742c57d1bb1a4da..6ffa553c388ed70ad1199772569587a445954da5 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #include <linux/sysfs.h>
index d5b5b5c1923ee1a6ed2faf8dc880b6f8c2cf37fd..3ddd3483b7ccc791abc7be3cb4a87d75c7103c9a 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #ifndef __I915_OA_KBLGT3_H__
index bf8b8cd8a50d6696b6be196666666b320b26b39c..7ce6ee851d4315774ea7380dbab1d19a2df2f7af 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #include <linux/sysfs.h>
index fe1aa2c0395840ad8f9755022b29d60eaeac9254..be6256037239919ba2995c450a584c806c0c11d8 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #ifndef __I915_OA_SKLGT2_H__
index ae534c7c8135e133448d5b3e1cdb8ce0ab6ff309..086ca2631e1cfd87d730f694289d813c4cea11dd 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #include <linux/sysfs.h>
index 06746b2616c81db701b2856c46ac20eb21c36bfd..650beb068e566e4ce885da6d0c590044bafd8ee8 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #ifndef __I915_OA_SKLGT3_H__
index 817fba2d82df36fc19a0b52ea06800adf6d87194..b291a6eb8a87b983b648bac0e135a34675bad36d 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #include <linux/sysfs.h>
index 944fd525c8b15768ea00fb170d015119c88fae0c..8dcf849d131e0f33af7bfc7ebd37298f1cd529c8 100644 (file)
@@ -1,29 +1,10 @@
 /*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
  *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
  */
 
 #ifndef __I915_OA_SKLGT4_H__
index 295e981e4a398c242d10bd6ed446aeb0bfecdc70..2e0356561839d15016d72394d7960388545fb223 100644 (file)
@@ -82,10 +82,6 @@ i915_param_named_unsafe(enable_hangcheck, bool, 0644,
        "WARNING: Disabling this can cause system wide hangs. "
        "(default: true)");
 
-i915_param_named_unsafe(enable_ppgtt, int, 0400,
-       "Override PPGTT usage. "
-       "(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)");
-
 i915_param_named_unsafe(enable_psr, int, 0600,
        "Enable PSR "
        "(0=disabled, 1=enabled) "
@@ -171,8 +167,10 @@ i915_param_named_unsafe(inject_load_failure, uint, 0400,
 i915_param_named(enable_dpcd_backlight, bool, 0600,
        "Enable support for DPCD backlight control (default:false)");
 
+#if IS_ENABLED(CONFIG_DRM_I915_GVT)
 i915_param_named(enable_gvt, bool, 0400,
        "Enable support for Intel GVT-g graphics virtualization host support(default:false)");
+#endif
 
 static __always_inline void _print_param(struct drm_printer *p,
                                         const char *name,
@@ -188,7 +186,8 @@ static __always_inline void _print_param(struct drm_printer *p,
        else if (!__builtin_strcmp(type, "char *"))
                drm_printf(p, "i915.%s=%s\n", name, *(const char **)x);
        else
-               BUILD_BUG();
+               WARN_ONCE(1, "no printer defined for param type %s (i915.%s)\n",
+                         type, name);
 }
 
 /**
index 6c4d4a21474b5ffaa9954a145068b1d3effce694..7e56c516c815c269230c697da6166a7fae455fd8 100644 (file)
@@ -41,7 +41,6 @@ struct drm_printer;
        param(int, vbt_sdvo_panel_type, -1) \
        param(int, enable_dc, -1) \
        param(int, enable_fbc, -1) \
-       param(int, enable_ppgtt, -1) \
        param(int, enable_psr, -1) \
        param(int, disable_power_well, -1) \
        param(int, enable_ips, 1) \
index d6f7b9fe1d261fcaa4630f88659427deca873bf8..1b81d7cb209e02a6c10f82e091a2e2528061d468 100644 (file)
 #define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1)
 
 #define GEN_DEFAULT_PIPEOFFSETS \
-       .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
-                         PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
-       .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
-                          TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
-       .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
+       .pipe_offsets = { \
+               [TRANSCODER_A] = PIPE_A_OFFSET, \
+               [TRANSCODER_B] = PIPE_B_OFFSET, \
+               [TRANSCODER_C] = PIPE_C_OFFSET, \
+               [TRANSCODER_EDP] = PIPE_EDP_OFFSET, \
+       }, \
+       .trans_offsets = { \
+               [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+               [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+               [TRANSCODER_C] = TRANSCODER_C_OFFSET, \
+               [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \
+       }
 
 #define GEN_CHV_PIPEOFFSETS \
-       .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
-                         CHV_PIPE_C_OFFSET }, \
-       .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
-                          CHV_TRANSCODER_C_OFFSET, }, \
-       .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
-                            CHV_PALETTE_C_OFFSET }
+       .pipe_offsets = { \
+               [TRANSCODER_A] = PIPE_A_OFFSET, \
+               [TRANSCODER_B] = PIPE_B_OFFSET, \
+               [TRANSCODER_C] = CHV_PIPE_C_OFFSET, \
+       }, \
+       .trans_offsets = { \
+               [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+               [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+               [TRANSCODER_C] = CHV_TRANSCODER_C_OFFSET, \
+       }
 
 #define CURSOR_OFFSETS \
        .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
@@ -252,7 +263,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
        .has_llc = 1, \
        .has_rc6 = 1, \
        .has_rc6p = 1, \
-       .has_aliasing_ppgtt = 1, \
+       .ppgtt = INTEL_PPGTT_ALIASING, \
        GEN_DEFAULT_PIPEOFFSETS, \
        GEN_DEFAULT_PAGE_SIZES, \
        CURSOR_OFFSETS
@@ -297,8 +308,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
        .has_llc = 1, \
        .has_rc6 = 1, \
        .has_rc6p = 1, \
-       .has_aliasing_ppgtt = 1, \
-       .has_full_ppgtt = 1, \
+       .ppgtt = INTEL_PPGTT_FULL, \
        GEN_DEFAULT_PIPEOFFSETS, \
        GEN_DEFAULT_PAGE_SIZES, \
        IVB_CURSOR_OFFSETS
@@ -351,8 +361,7 @@ static const struct intel_device_info intel_valleyview_info = {
        .has_rc6 = 1,
        .has_gmch_display = 1,
        .has_hotplug = 1,
-       .has_aliasing_ppgtt = 1,
-       .has_full_ppgtt = 1,
+       .ppgtt = INTEL_PPGTT_FULL,
        .has_snoop = true,
        .has_coherent_ggtt = false,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
@@ -399,7 +408,7 @@ static const struct intel_device_info intel_haswell_gt3_info = {
        .page_sizes = I915_GTT_PAGE_SIZE_4K | \
                      I915_GTT_PAGE_SIZE_2M, \
        .has_logical_ring_contexts = 1, \
-       .has_full_48bit_ppgtt = 1, \
+       .ppgtt = INTEL_PPGTT_FULL_4LVL, \
        .has_64bit_reloc = 1, \
        .has_reset_engine = 1
 
@@ -443,8 +452,7 @@ static const struct intel_device_info intel_cherryview_info = {
        .has_rc6 = 1,
        .has_logical_ring_contexts = 1,
        .has_gmch_display = 1,
-       .has_aliasing_ppgtt = 1,
-       .has_full_ppgtt = 1,
+       .ppgtt = INTEL_PPGTT_FULL,
        .has_reset_engine = 1,
        .has_snoop = true,
        .has_coherent_ggtt = false,
@@ -472,6 +480,8 @@ static const struct intel_device_info intel_cherryview_info = {
 
 #define SKL_PLATFORM \
        GEN9_FEATURES, \
+       /* Display WA #0477 WaDisableIPC: skl */ \
+       .has_ipc = 0, \
        PLATFORM(INTEL_SKYLAKE)
 
 static const struct intel_device_info intel_skylake_gt1_info = {
@@ -518,9 +528,7 @@ static const struct intel_device_info intel_skylake_gt4_info = {
        .has_logical_ring_contexts = 1, \
        .has_logical_ring_preemption = 1, \
        .has_guc = 1, \
-       .has_aliasing_ppgtt = 1, \
-       .has_full_ppgtt = 1, \
-       .has_full_48bit_ppgtt = 1, \
+       .ppgtt = INTEL_PPGTT_FULL_4LVL, \
        .has_reset_engine = 1, \
        .has_snoop = true, \
        .has_coherent_ggtt = false, \
@@ -598,6 +606,22 @@ static const struct intel_device_info intel_cannonlake_info = {
 
 #define GEN11_FEATURES \
        GEN10_FEATURES, \
+       .pipe_offsets = { \
+               [TRANSCODER_A] = PIPE_A_OFFSET, \
+               [TRANSCODER_B] = PIPE_B_OFFSET, \
+               [TRANSCODER_C] = PIPE_C_OFFSET, \
+               [TRANSCODER_EDP] = PIPE_EDP_OFFSET, \
+               [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
+               [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
+       }, \
+       .trans_offsets = { \
+               [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+               [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+               [TRANSCODER_C] = TRANSCODER_C_OFFSET, \
+               [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \
+               [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \
+               [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
+       }, \
        GEN(11), \
        .ddb_size = 2048, \
        .has_logical_ring_elsq = 1
@@ -663,7 +687,7 @@ static const struct pci_device_id pciidlist[] = {
        INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info),
        INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
        INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
-       INTEL_AML_GT2_IDS(&intel_kabylake_gt2_info),
+       INTEL_AML_KBL_GT2_IDS(&intel_kabylake_gt2_info),
        INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
        INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
        INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
@@ -671,6 +695,7 @@ static const struct pci_device_id pciidlist[] = {
        INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
        INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info),
        INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info),
+       INTEL_AML_CFL_GT2_IDS(&intel_coffeelake_gt2_info),
        INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info),
        INTEL_CNL_IDS(&intel_cannonlake_info),
        INTEL_ICL_11_IDS(&intel_icelake_11_info),
index 664b96bb65a38d08ac99b2e16dc350c1f511fd47..4529edfdcfc80580ea66b306713a42250e1f2626 100644 (file)
@@ -890,8 +890,8 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
                DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
                          dev_priv->perf.oa.period_exponent);
 
-               dev_priv->perf.oa.ops.oa_disable(dev_priv);
-               dev_priv->perf.oa.ops.oa_enable(dev_priv);
+               dev_priv->perf.oa.ops.oa_disable(stream);
+               dev_priv->perf.oa.ops.oa_enable(stream);
 
                /*
                 * Note: .oa_enable() is expected to re-init the oabuffer and
@@ -1114,8 +1114,8 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
                DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
                          dev_priv->perf.oa.period_exponent);
 
-               dev_priv->perf.oa.ops.oa_disable(dev_priv);
-               dev_priv->perf.oa.ops.oa_enable(dev_priv);
+               dev_priv->perf.oa.ops.oa_disable(stream);
+               dev_priv->perf.oa.ops.oa_enable(stream);
 
                oastatus1 = I915_READ(GEN7_OASTATUS1);
        }
@@ -1528,8 +1528,6 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
                goto err_unpin;
        }
 
-       dev_priv->perf.oa.ops.init_oa_buffer(dev_priv);
-
        DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
                         i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma),
                         dev_priv->perf.oa.oa_buffer.vaddr);
@@ -1563,9 +1561,11 @@ static void config_oa_regs(struct drm_i915_private *dev_priv,
        }
 }
 
-static int hsw_enable_metric_set(struct drm_i915_private *dev_priv,
-                                const struct i915_oa_config *oa_config)
+static int hsw_enable_metric_set(struct i915_perf_stream *stream)
 {
+       struct drm_i915_private *dev_priv = stream->dev_priv;
+       const struct i915_oa_config *oa_config = stream->oa_config;
+
        /* PRM:
         *
         * OA unit is using “crclk” for its functionality. When trunk
@@ -1767,9 +1767,10 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
        return 0;
 }
 
-static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
-                                 const struct i915_oa_config *oa_config)
+static int gen8_enable_metric_set(struct i915_perf_stream *stream)
 {
+       struct drm_i915_private *dev_priv = stream->dev_priv;
+       const struct i915_oa_config *oa_config = stream->oa_config;
        int ret;
 
        /*
@@ -1837,10 +1838,10 @@ static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
                   I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE);
 }
 
-static void gen7_oa_enable(struct drm_i915_private *dev_priv)
+static void gen7_oa_enable(struct i915_perf_stream *stream)
 {
-       struct i915_gem_context *ctx =
-                       dev_priv->perf.oa.exclusive_stream->ctx;
+       struct drm_i915_private *dev_priv = stream->dev_priv;
+       struct i915_gem_context *ctx = stream->ctx;
        u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
        bool periodic = dev_priv->perf.oa.periodic;
        u32 period_exponent = dev_priv->perf.oa.period_exponent;
@@ -1867,8 +1868,9 @@ static void gen7_oa_enable(struct drm_i915_private *dev_priv)
                   GEN7_OACONTROL_ENABLE);
 }
 
-static void gen8_oa_enable(struct drm_i915_private *dev_priv)
+static void gen8_oa_enable(struct i915_perf_stream *stream)
 {
+       struct drm_i915_private *dev_priv = stream->dev_priv;
        u32 report_format = dev_priv->perf.oa.oa_buffer.format;
 
        /*
@@ -1905,7 +1907,7 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
 {
        struct drm_i915_private *dev_priv = stream->dev_priv;
 
-       dev_priv->perf.oa.ops.oa_enable(dev_priv);
+       dev_priv->perf.oa.ops.oa_enable(stream);
 
        if (dev_priv->perf.oa.periodic)
                hrtimer_start(&dev_priv->perf.oa.poll_check_timer,
@@ -1913,8 +1915,10 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
                              HRTIMER_MODE_REL_PINNED);
 }
 
-static void gen7_oa_disable(struct drm_i915_private *dev_priv)
+static void gen7_oa_disable(struct i915_perf_stream *stream)
 {
+       struct drm_i915_private *dev_priv = stream->dev_priv;
+
        I915_WRITE(GEN7_OACONTROL, 0);
        if (intel_wait_for_register(dev_priv,
                                    GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
@@ -1922,8 +1926,10 @@ static void gen7_oa_disable(struct drm_i915_private *dev_priv)
                DRM_ERROR("wait for OA to be disabled timed out\n");
 }
 
-static void gen8_oa_disable(struct drm_i915_private *dev_priv)
+static void gen8_oa_disable(struct i915_perf_stream *stream)
 {
+       struct drm_i915_private *dev_priv = stream->dev_priv;
+
        I915_WRITE(GEN8_OACONTROL, 0);
        if (intel_wait_for_register(dev_priv,
                                    GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
@@ -1943,7 +1949,7 @@ static void i915_oa_stream_disable(struct i915_perf_stream *stream)
 {
        struct drm_i915_private *dev_priv = stream->dev_priv;
 
-       dev_priv->perf.oa.ops.oa_disable(dev_priv);
+       dev_priv->perf.oa.ops.oa_disable(stream);
 
        if (dev_priv->perf.oa.periodic)
                hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
@@ -1998,7 +2004,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
                return -EINVAL;
        }
 
-       if (!dev_priv->perf.oa.ops.init_oa_buffer) {
+       if (!dev_priv->perf.oa.ops.enable_metric_set) {
                DRM_DEBUG("OA unit not supported\n");
                return -ENODEV;
        }
@@ -2092,8 +2098,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
        if (ret)
                goto err_lock;
 
-       ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
-                                                     stream->oa_config);
+       ret = dev_priv->perf.oa.ops.enable_metric_set(stream);
        if (ret) {
                DRM_DEBUG("Unable to enable metric set\n");
                goto err_enable;
@@ -3387,7 +3392,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
                dev_priv->perf.oa.ops.is_valid_mux_reg =
                        hsw_is_valid_mux_addr;
                dev_priv->perf.oa.ops.is_valid_flex_reg = NULL;
-               dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer;
                dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
                dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
                dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
@@ -3406,7 +3410,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
                 */
                dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
 
-               dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer;
                dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
                dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
                dev_priv->perf.oa.ops.read = gen8_oa_read;
index 3f502eef243166612321dd7cc219374f1daae43b..6fc4b8eeab428f7b10bf56ba5c07eba5789b59a7 100644 (file)
@@ -27,8 +27,7 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
 
        slice_length = sizeof(sseu->slice_mask);
        subslice_length = sseu->max_slices *
-               DIV_ROUND_UP(sseu->max_subslices,
-                            sizeof(sseu->subslice_mask[0]) * BITS_PER_BYTE);
+               DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE);
        eu_length = sseu->max_slices * sseu->max_subslices *
                DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE);
 
index 7c491ea3d052aaccfc5eab69e2ea8f6b31f6813e..47baf2fe8f71cf1bf0265a214384cb44c254661a 100644 (file)
@@ -157,20 +157,37 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 /*
  * Named helper wrappers around _PICK_EVEN() and _PICK().
  */
-#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b)
-#define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b))
-#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b)
-#define _MMIO_PLANE(plane, a, b) _MMIO_PIPE(plane, a, b)
-#define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b)
-#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b))
-#define _PORT(port, a, b) _PICK_EVEN(port, a, b)
-#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b))
-#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
-#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
-#define _PLL(pll, a, b) _PICK_EVEN(pll, a, b)
-#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
-#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
-#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
+#define _PIPE(pipe, a, b)              _PICK_EVEN(pipe, a, b)
+#define _PLANE(plane, a, b)            _PICK_EVEN(plane, a, b)
+#define _TRANS(tran, a, b)             _PICK_EVEN(tran, a, b)
+#define _PORT(port, a, b)              _PICK_EVEN(port, a, b)
+#define _PLL(pll, a, b)                        _PICK_EVEN(pll, a, b)
+
+#define _MMIO_PIPE(pipe, a, b)         _MMIO(_PIPE(pipe, a, b))
+#define _MMIO_PLANE(plane, a, b)       _MMIO(_PLANE(plane, a, b))
+#define _MMIO_TRANS(tran, a, b)                _MMIO(_TRANS(tran, a, b))
+#define _MMIO_PORT(port, a, b)         _MMIO(_PORT(port, a, b))
+#define _MMIO_PLL(pll, a, b)           _MMIO(_PLL(pll, a, b))
+
+#define _PHY3(phy, ...)                        _PICK(phy, __VA_ARGS__)
+
+#define _MMIO_PIPE3(pipe, a, b, c)     _MMIO(_PICK(pipe, a, b, c))
+#define _MMIO_PORT3(pipe, a, b, c)     _MMIO(_PICK(pipe, a, b, c))
+#define _MMIO_PHY3(phy, a, b, c)       _MMIO(_PHY3(phy, a, b, c))
+
+/*
+ * Device info offset array based helpers for groups of registers with unevenly
+ * spaced base offsets.
+ */
+#define _MMIO_PIPE2(pipe, reg)         _MMIO(dev_priv->info.pipe_offsets[pipe] - \
+                                             dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
+                                             dev_priv->info.display_mmio_offset)
+#define _MMIO_TRANS2(pipe, reg)                _MMIO(dev_priv->info.trans_offsets[(pipe)] - \
+                                             dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
+                                             dev_priv->info.display_mmio_offset)
+#define _CURSOR2(pipe, reg)            _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \
+                                             dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
+                                             dev_priv->info.display_mmio_offset)
 
 #define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
 #define _MASKED_FIELD(mask, value) ({                                     \
@@ -1631,35 +1648,6 @@ enum i915_power_well_id {
 #define   PHY_RESERVED                 (1 << 7)
 #define BXT_PORT_CL1CM_DW0(phy)                _BXT_PHY((phy), _PORT_CL1CM_DW0_BC)
 
-#define CNL_PORT_CL1CM_DW5             _MMIO(0x162014)
-#define   CL_POWER_DOWN_ENABLE         (1 << 4)
-#define   SUS_CLOCK_CONFIG             (3 << 0)
-
-#define _ICL_PORT_CL_DW5_A     0x162014
-#define _ICL_PORT_CL_DW5_B     0x6C014
-#define ICL_PORT_CL_DW5(port)  _MMIO_PORT(port, _ICL_PORT_CL_DW5_A, \
-                                                _ICL_PORT_CL_DW5_B)
-
-#define _CNL_PORT_CL_DW10_A            0x162028
-#define _ICL_PORT_CL_DW10_B            0x6c028
-#define ICL_PORT_CL_DW10(port)         _MMIO_PORT(port,        \
-                                                  _CNL_PORT_CL_DW10_A, \
-                                                  _ICL_PORT_CL_DW10_B)
-#define  PG_SEQ_DELAY_OVERRIDE_MASK    (3 << 25)
-#define  PG_SEQ_DELAY_OVERRIDE_SHIFT   25
-#define  PG_SEQ_DELAY_OVERRIDE_ENABLE  (1 << 24)
-#define  PWR_UP_ALL_LANES              (0x0 << 4)
-#define  PWR_DOWN_LN_3_2_1             (0xe << 4)
-#define  PWR_DOWN_LN_3_2               (0xc << 4)
-#define  PWR_DOWN_LN_3                 (0x8 << 4)
-#define  PWR_DOWN_LN_2_1_0             (0x7 << 4)
-#define  PWR_DOWN_LN_1_0               (0x3 << 4)
-#define  PWR_DOWN_LN_1                 (0x2 << 4)
-#define  PWR_DOWN_LN_3_1               (0xa << 4)
-#define  PWR_DOWN_LN_3_1_0             (0xb << 4)
-#define  PWR_DOWN_LN_MASK              (0xf << 4)
-#define  PWR_DOWN_LN_SHIFT             4
-
 #define _PORT_CL1CM_DW9_A              0x162024
 #define _PORT_CL1CM_DW9_BC             0x6C024
 #define   IREF0RC_OFFSET_SHIFT         8
@@ -1672,13 +1660,6 @@ enum i915_power_well_id {
 #define   IREF1RC_OFFSET_MASK          (0xFF << IREF1RC_OFFSET_SHIFT)
 #define BXT_PORT_CL1CM_DW10(phy)       _BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
 
-#define _ICL_PORT_CL_DW12_A            0x162030
-#define _ICL_PORT_CL_DW12_B            0x6C030
-#define   ICL_LANE_ENABLE_AUX          (1 << 0)
-#define ICL_PORT_CL_DW12(port)         _MMIO_PORT((port),              \
-                                                  _ICL_PORT_CL_DW12_A, \
-                                                  _ICL_PORT_CL_DW12_B)
-
 #define _PORT_CL1CM_DW28_A             0x162070
 #define _PORT_CL1CM_DW28_BC            0x6C070
 #define   OCL1_POWER_DOWN_EN           (1 << 23)
@@ -1691,6 +1672,74 @@ enum i915_power_well_id {
 #define   OCL2_LDOFUSE_PWR_DIS         (1 << 6)
 #define BXT_PORT_CL1CM_DW30(phy)       _BXT_PHY((phy), _PORT_CL1CM_DW30_BC)
 
+/*
+ * CNL/ICL Port/COMBO-PHY Registers
+ */
+#define _ICL_COMBOPHY_A                        0x162000
+#define _ICL_COMBOPHY_B                        0x6C000
+#define _ICL_COMBOPHY(port)            _PICK(port, _ICL_COMBOPHY_A, \
+                                             _ICL_COMBOPHY_B)
+
+/* CNL/ICL Port CL_DW registers */
+#define _ICL_PORT_CL_DW(dw, port)      (_ICL_COMBOPHY(port) + \
+                                        4 * (dw))
+
+#define CNL_PORT_CL1CM_DW5             _MMIO(0x162014)
+#define ICL_PORT_CL_DW5(port)          _MMIO(_ICL_PORT_CL_DW(5, port))
+#define   CL_POWER_DOWN_ENABLE         (1 << 4)
+#define   SUS_CLOCK_CONFIG             (3 << 0)
+
+#define ICL_PORT_CL_DW10(port)         _MMIO(_ICL_PORT_CL_DW(10, port))
+#define  PG_SEQ_DELAY_OVERRIDE_MASK    (3 << 25)
+#define  PG_SEQ_DELAY_OVERRIDE_SHIFT   25
+#define  PG_SEQ_DELAY_OVERRIDE_ENABLE  (1 << 24)
+#define  PWR_UP_ALL_LANES              (0x0 << 4)
+#define  PWR_DOWN_LN_3_2_1             (0xe << 4)
+#define  PWR_DOWN_LN_3_2               (0xc << 4)
+#define  PWR_DOWN_LN_3                 (0x8 << 4)
+#define  PWR_DOWN_LN_2_1_0             (0x7 << 4)
+#define  PWR_DOWN_LN_1_0               (0x3 << 4)
+#define  PWR_DOWN_LN_1                 (0x2 << 4)
+#define  PWR_DOWN_LN_3_1               (0xa << 4)
+#define  PWR_DOWN_LN_3_1_0             (0xb << 4)
+#define  PWR_DOWN_LN_MASK              (0xf << 4)
+#define  PWR_DOWN_LN_SHIFT             4
+
+#define ICL_PORT_CL_DW12(port)         _MMIO(_ICL_PORT_CL_DW(12, port))
+#define   ICL_LANE_ENABLE_AUX          (1 << 0)
+
+/* CNL/ICL Port COMP_DW registers */
+#define _ICL_PORT_COMP                 0x100
+#define _ICL_PORT_COMP_DW(dw, port)    (_ICL_COMBOPHY(port) + \
+                                        _ICL_PORT_COMP + 4 * (dw))
+
+#define CNL_PORT_COMP_DW0              _MMIO(0x162100)
+#define ICL_PORT_COMP_DW0(port)                _MMIO(_ICL_PORT_COMP_DW(0, port))
+#define   COMP_INIT                    (1 << 31)
+
+#define CNL_PORT_COMP_DW1              _MMIO(0x162104)
+#define ICL_PORT_COMP_DW1(port)                _MMIO(_ICL_PORT_COMP_DW(1, port))
+
+#define CNL_PORT_COMP_DW3              _MMIO(0x16210c)
+#define ICL_PORT_COMP_DW3(port)                _MMIO(_ICL_PORT_COMP_DW(3, port))
+#define   PROCESS_INFO_DOT_0           (0 << 26)
+#define   PROCESS_INFO_DOT_1           (1 << 26)
+#define   PROCESS_INFO_DOT_4           (2 << 26)
+#define   PROCESS_INFO_MASK            (7 << 26)
+#define   PROCESS_INFO_SHIFT           26
+#define   VOLTAGE_INFO_0_85V           (0 << 24)
+#define   VOLTAGE_INFO_0_95V           (1 << 24)
+#define   VOLTAGE_INFO_1_05V           (2 << 24)
+#define   VOLTAGE_INFO_MASK            (3 << 24)
+#define   VOLTAGE_INFO_SHIFT           24
+
+#define CNL_PORT_COMP_DW9              _MMIO(0x162124)
+#define ICL_PORT_COMP_DW9(port)                _MMIO(_ICL_PORT_COMP_DW(9, port))
+
+#define CNL_PORT_COMP_DW10             _MMIO(0x162128)
+#define ICL_PORT_COMP_DW10(port)       _MMIO(_ICL_PORT_COMP_DW(10, port))
+
+/* CNL/ICL Port PCS registers */
 #define _CNL_PORT_PCS_DW1_GRP_AE       0x162304
 #define _CNL_PORT_PCS_DW1_GRP_B                0x162384
 #define _CNL_PORT_PCS_DW1_GRP_C                0x162B04
@@ -1708,7 +1757,6 @@ enum i915_power_well_id {
                                                    _CNL_PORT_PCS_DW1_GRP_D, \
                                                    _CNL_PORT_PCS_DW1_GRP_AE, \
                                                    _CNL_PORT_PCS_DW1_GRP_F))
-
 #define CNL_PORT_PCS_DW1_LN0(port)     _MMIO(_PICK(port, \
                                                    _CNL_PORT_PCS_DW1_LN0_AE, \
                                                    _CNL_PORT_PCS_DW1_LN0_B, \
@@ -1717,24 +1765,21 @@ enum i915_power_well_id {
                                                    _CNL_PORT_PCS_DW1_LN0_AE, \
                                                    _CNL_PORT_PCS_DW1_LN0_F))
 
-#define _ICL_PORT_PCS_DW1_GRP_A                0x162604
-#define _ICL_PORT_PCS_DW1_GRP_B                0x6C604
-#define _ICL_PORT_PCS_DW1_LN0_A                0x162804
-#define _ICL_PORT_PCS_DW1_LN0_B                0x6C804
-#define _ICL_PORT_PCS_DW1_AUX_A                0x162304
-#define _ICL_PORT_PCS_DW1_AUX_B                0x6c304
-#define ICL_PORT_PCS_DW1_GRP(port)     _MMIO_PORT(port,\
-                                                  _ICL_PORT_PCS_DW1_GRP_A, \
-                                                  _ICL_PORT_PCS_DW1_GRP_B)
-#define ICL_PORT_PCS_DW1_LN0(port)     _MMIO_PORT(port, \
-                                                  _ICL_PORT_PCS_DW1_LN0_A, \
-                                                  _ICL_PORT_PCS_DW1_LN0_B)
-#define ICL_PORT_PCS_DW1_AUX(port)     _MMIO_PORT(port, \
-                                                  _ICL_PORT_PCS_DW1_AUX_A, \
-                                                  _ICL_PORT_PCS_DW1_AUX_B)
+#define _ICL_PORT_PCS_AUX              0x300
+#define _ICL_PORT_PCS_GRP              0x600
+#define _ICL_PORT_PCS_LN(ln)           (0x800 + (ln) * 0x100)
+#define _ICL_PORT_PCS_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \
+                                        _ICL_PORT_PCS_AUX + 4 * (dw))
+#define _ICL_PORT_PCS_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \
+                                        _ICL_PORT_PCS_GRP + 4 * (dw))
+#define _ICL_PORT_PCS_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \
+                                         _ICL_PORT_PCS_LN(ln) + 4 * (dw))
+#define ICL_PORT_PCS_DW1_AUX(port)     _MMIO(_ICL_PORT_PCS_DW_AUX(1, port))
+#define ICL_PORT_PCS_DW1_GRP(port)     _MMIO(_ICL_PORT_PCS_DW_GRP(1, port))
+#define ICL_PORT_PCS_DW1_LN0(port)     _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, port))
 #define   COMMON_KEEPER_EN             (1 << 26)
 
-/* CNL Port TX registers */
+/* CNL/ICL Port TX registers */
 #define _CNL_PORT_TX_AE_GRP_OFFSET             0x162340
 #define _CNL_PORT_TX_B_GRP_OFFSET              0x1623C0
 #define _CNL_PORT_TX_C_GRP_OFFSET              0x162B40
@@ -1762,23 +1807,22 @@ enum i915_power_well_id {
                                               _CNL_PORT_TX_F_LN0_OFFSET) + \
                                               4 * (dw))
 
-#define CNL_PORT_TX_DW2_GRP(port)      _MMIO(_CNL_PORT_TX_DW_GRP((port), 2))
-#define CNL_PORT_TX_DW2_LN0(port)      _MMIO(_CNL_PORT_TX_DW_LN0((port), 2))
-#define _ICL_PORT_TX_DW2_GRP_A         0x162688
-#define _ICL_PORT_TX_DW2_GRP_B         0x6C688
-#define _ICL_PORT_TX_DW2_LN0_A         0x162888
-#define _ICL_PORT_TX_DW2_LN0_B         0x6C888
-#define _ICL_PORT_TX_DW2_AUX_A         0x162388
-#define _ICL_PORT_TX_DW2_AUX_B         0x6c388
-#define ICL_PORT_TX_DW2_GRP(port)      _MMIO_PORT(port, \
-                                                  _ICL_PORT_TX_DW2_GRP_A, \
-                                                  _ICL_PORT_TX_DW2_GRP_B)
-#define ICL_PORT_TX_DW2_LN0(port)      _MMIO_PORT(port, \
-                                                  _ICL_PORT_TX_DW2_LN0_A, \
-                                                  _ICL_PORT_TX_DW2_LN0_B)
-#define ICL_PORT_TX_DW2_AUX(port)      _MMIO_PORT(port, \
-                                                  _ICL_PORT_TX_DW2_AUX_A, \
-                                                  _ICL_PORT_TX_DW2_AUX_B)
+#define _ICL_PORT_TX_AUX               0x380
+#define _ICL_PORT_TX_GRP               0x680
+#define _ICL_PORT_TX_LN(ln)            (0x880 + (ln) * 0x100)
+
+#define _ICL_PORT_TX_DW_AUX(dw, port)  (_ICL_COMBOPHY(port) + \
+                                        _ICL_PORT_TX_AUX + 4 * (dw))
+#define _ICL_PORT_TX_DW_GRP(dw, port)  (_ICL_COMBOPHY(port) + \
+                                        _ICL_PORT_TX_GRP + 4 * (dw))
+#define _ICL_PORT_TX_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \
+                                         _ICL_PORT_TX_LN(ln) + 4 * (dw))
+
+#define CNL_PORT_TX_DW2_GRP(port)      _MMIO(_CNL_PORT_TX_DW_GRP(2, port))
+#define CNL_PORT_TX_DW2_LN0(port)      _MMIO(_CNL_PORT_TX_DW_LN0(2, port))
+#define ICL_PORT_TX_DW2_AUX(port)      _MMIO(_ICL_PORT_TX_DW_AUX(2, port))
+#define ICL_PORT_TX_DW2_GRP(port)      _MMIO(_ICL_PORT_TX_DW_GRP(2, port))
+#define ICL_PORT_TX_DW2_LN0(port)      _MMIO(_ICL_PORT_TX_DW_LN(2, 0, port))
 #define   SWING_SEL_UPPER(x)           (((x) >> 3) << 15)
 #define   SWING_SEL_UPPER_MASK         (1 << 15)
 #define   SWING_SEL_LOWER(x)           (((x) & 0x7) << 11)
@@ -1795,24 +1839,10 @@ enum i915_power_well_id {
 #define CNL_PORT_TX_DW4_LN(port, ln)   _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \
                                           ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
                                                    _CNL_PORT_TX_DW4_LN0_AE)))
-#define _ICL_PORT_TX_DW4_GRP_A         0x162690
-#define _ICL_PORT_TX_DW4_GRP_B         0x6C690
-#define _ICL_PORT_TX_DW4_LN0_A         0x162890
-#define _ICL_PORT_TX_DW4_LN1_A         0x162990
-#define _ICL_PORT_TX_DW4_LN0_B         0x6C890
-#define _ICL_PORT_TX_DW4_AUX_A         0x162390
-#define _ICL_PORT_TX_DW4_AUX_B         0x6c390
-#define ICL_PORT_TX_DW4_GRP(port)      _MMIO_PORT(port, \
-                                                  _ICL_PORT_TX_DW4_GRP_A, \
-                                                  _ICL_PORT_TX_DW4_GRP_B)
-#define ICL_PORT_TX_DW4_LN(port, ln)   _MMIO(_PORT(port, \
-                                                  _ICL_PORT_TX_DW4_LN0_A, \
-                                                  _ICL_PORT_TX_DW4_LN0_B) + \
-                                            ((ln) * (_ICL_PORT_TX_DW4_LN1_A - \
-                                                     _ICL_PORT_TX_DW4_LN0_A)))
-#define ICL_PORT_TX_DW4_AUX(port)      _MMIO_PORT(port, \
-                                                  _ICL_PORT_TX_DW4_AUX_A, \
-                                                  _ICL_PORT_TX_DW4_AUX_B)
+#define ICL_PORT_TX_DW4_AUX(port)      _MMIO(_ICL_PORT_TX_DW_AUX(4, port))
+#define ICL_PORT_TX_DW4_GRP(port)      _MMIO(_ICL_PORT_TX_DW_GRP(4, port))
+#define ICL_PORT_TX_DW4_LN0(port)      _MMIO(_ICL_PORT_TX_DW_LN(4, 0, port))
+#define ICL_PORT_TX_DW4_LN(port, ln)   _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port))
 #define   LOADGEN_SELECT               (1 << 31)
 #define   POST_CURSOR_1(x)             ((x) << 12)
 #define   POST_CURSOR_1_MASK           (0x3F << 12)
@@ -1821,23 +1851,11 @@ enum i915_power_well_id {
 #define   CURSOR_COEFF(x)              ((x) << 0)
 #define   CURSOR_COEFF_MASK            (0x3F << 0)
 
-#define CNL_PORT_TX_DW5_GRP(port)      _MMIO(_CNL_PORT_TX_DW_GRP((port), 5))
-#define CNL_PORT_TX_DW5_LN0(port)      _MMIO(_CNL_PORT_TX_DW_LN0((port), 5))
-#define _ICL_PORT_TX_DW5_GRP_A         0x162694
-#define _ICL_PORT_TX_DW5_GRP_B         0x6C694
-#define _ICL_PORT_TX_DW5_LN0_A         0x162894
-#define _ICL_PORT_TX_DW5_LN0_B         0x6C894
-#define _ICL_PORT_TX_DW5_AUX_A         0x162394
-#define _ICL_PORT_TX_DW5_AUX_B         0x6c394
-#define ICL_PORT_TX_DW5_GRP(port)      _MMIO_PORT(port, \
-                                                  _ICL_PORT_TX_DW5_GRP_A, \
-                                                  _ICL_PORT_TX_DW5_GRP_B)
-#define ICL_PORT_TX_DW5_LN0(port)      _MMIO_PORT(port, \
-                                                  _ICL_PORT_TX_DW5_LN0_A, \
-                                                  _ICL_PORT_TX_DW5_LN0_B)
-#define ICL_PORT_TX_DW5_AUX(port)      _MMIO_PORT(port, \
-                                                  _ICL_PORT_TX_DW5_AUX_A, \
-                                                  _ICL_PORT_TX_DW5_AUX_B)
+#define CNL_PORT_TX_DW5_GRP(port)      _MMIO(_CNL_PORT_TX_DW_GRP(5, port))
+#define CNL_PORT_TX_DW5_LN0(port)      _MMIO(_CNL_PORT_TX_DW_LN0(5, port))
+#define ICL_PORT_TX_DW5_AUX(port)      _MMIO(_ICL_PORT_TX_DW_AUX(5, port))
+#define ICL_PORT_TX_DW5_GRP(port)      _MMIO(_ICL_PORT_TX_DW_GRP(5, port))
+#define ICL_PORT_TX_DW5_LN0(port)      _MMIO(_ICL_PORT_TX_DW_LN(5, 0, port))
 #define   TX_TRAINING_EN               (1 << 31)
 #define   TAP2_DISABLE                 (1 << 30)
 #define   TAP3_DISABLE                 (1 << 29)
@@ -2054,49 +2072,16 @@ enum i915_power_well_id {
 #define BXT_PORT_CL2CM_DW6(phy)                _BXT_PHY((phy), _PORT_CL2CM_DW6_BC)
 #define   DW6_OLDO_DYN_PWR_DOWN_EN     (1 << 28)
 
-#define CNL_PORT_COMP_DW0              _MMIO(0x162100)
-#define   COMP_INIT                    (1 << 31)
-#define CNL_PORT_COMP_DW1              _MMIO(0x162104)
-#define CNL_PORT_COMP_DW3              _MMIO(0x16210c)
-#define   PROCESS_INFO_DOT_0           (0 << 26)
-#define   PROCESS_INFO_DOT_1           (1 << 26)
-#define   PROCESS_INFO_DOT_4           (2 << 26)
-#define   PROCESS_INFO_MASK            (7 << 26)
-#define   PROCESS_INFO_SHIFT           26
-#define   VOLTAGE_INFO_0_85V           (0 << 24)
-#define   VOLTAGE_INFO_0_95V           (1 << 24)
-#define   VOLTAGE_INFO_1_05V           (2 << 24)
-#define   VOLTAGE_INFO_MASK            (3 << 24)
-#define   VOLTAGE_INFO_SHIFT           24
-#define CNL_PORT_COMP_DW9              _MMIO(0x162124)
-#define CNL_PORT_COMP_DW10             _MMIO(0x162128)
-
-#define _ICL_PORT_COMP_DW0_A           0x162100
-#define _ICL_PORT_COMP_DW0_B           0x6C100
-#define ICL_PORT_COMP_DW0(port)                _MMIO_PORT(port, _ICL_PORT_COMP_DW0_A, \
-                                                        _ICL_PORT_COMP_DW0_B)
-#define _ICL_PORT_COMP_DW1_A           0x162104
-#define _ICL_PORT_COMP_DW1_B           0x6C104
-#define ICL_PORT_COMP_DW1(port)                _MMIO_PORT(port, _ICL_PORT_COMP_DW1_A, \
-                                                        _ICL_PORT_COMP_DW1_B)
-#define _ICL_PORT_COMP_DW3_A           0x16210C
-#define _ICL_PORT_COMP_DW3_B           0x6C10C
-#define ICL_PORT_COMP_DW3(port)                _MMIO_PORT(port, _ICL_PORT_COMP_DW3_A, \
-                                                        _ICL_PORT_COMP_DW3_B)
-#define _ICL_PORT_COMP_DW9_A           0x162124
-#define _ICL_PORT_COMP_DW9_B           0x6C124
-#define ICL_PORT_COMP_DW9(port)                _MMIO_PORT(port, _ICL_PORT_COMP_DW9_A, \
-                                                        _ICL_PORT_COMP_DW9_B)
-#define _ICL_PORT_COMP_DW10_A          0x162128
-#define _ICL_PORT_COMP_DW10_B          0x6C128
-#define ICL_PORT_COMP_DW10(port)       _MMIO_PORT(port, \
-                                                  _ICL_PORT_COMP_DW10_A, \
-                                                  _ICL_PORT_COMP_DW10_B)
+#define FIA1_BASE                      0x163000
 
 /* ICL PHY DFLEX registers */
-#define PORT_TX_DFLEXDPMLE1            _MMIO(0x1638C0)
-#define   DFLEXDPMLE1_DPMLETC_MASK(n)  (0xf << (4 * (n)))
-#define   DFLEXDPMLE1_DPMLETC(n, x)    ((x) << (4 * (n)))
+#define PORT_TX_DFLEXDPMLE1            _MMIO(FIA1_BASE + 0x008C0)
+#define   DFLEXDPMLE1_DPMLETC_MASK(tc_port)    (0xf << (4 * (tc_port)))
+#define   DFLEXDPMLE1_DPMLETC_ML0(tc_port)     (1 << (4 * (tc_port)))
+#define   DFLEXDPMLE1_DPMLETC_ML1_0(tc_port)   (3 << (4 * (tc_port)))
+#define   DFLEXDPMLE1_DPMLETC_ML3(tc_port)     (8 << (4 * (tc_port)))
+#define   DFLEXDPMLE1_DPMLETC_ML3_2(tc_port)   (12 << (4 * (tc_port)))
+#define   DFLEXDPMLE1_DPMLETC_ML3_0(tc_port)   (15 << (4 * (tc_port)))
 
 /* BXT PHY Ref registers */
 #define _PORT_REF_DW3_A                        0x16218C
@@ -2413,6 +2398,7 @@ enum i915_power_well_id {
 
 #define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080)
 #define   GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF
+#define   GAMW_ECO_DEV_CTX_RELOAD_DISABLE      (1 << 7)
 
 #define GAMT_CHKN_BIT_REG      _MMIO(0x4ab8)
 #define   GAMT_CHKN_DISABLE_L3_COH_PIPE                        (1 << 31)
@@ -2573,6 +2559,7 @@ enum i915_power_well_id {
 /* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */
 #define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4)
 #define   GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2)
+#define   GEN11_ENABLE_32_PLANE_MODE (1 << 7)
 
 /* WaClearTdlStateAckDirtyBits */
 #define GEN8_STATE_ACK         _MMIO(0x20F0)
@@ -3475,11 +3462,13 @@ enum i915_power_well_id {
 /*
  * Palette regs
  */
-#define PALETTE_A_OFFSET 0xa000
-#define PALETTE_B_OFFSET 0xa800
-#define CHV_PALETTE_C_OFFSET 0xc000
-#define PALETTE(pipe, i) _MMIO(dev_priv->info.palette_offsets[pipe] +  \
-                             dev_priv->info.display_mmio_offset + (i) * 4)
+#define _PALETTE_A             0xa000
+#define _PALETTE_B             0xa800
+#define _CHV_PALETTE_C         0xc000
+#define PALETTE(pipe, i)       _MMIO(dev_priv->info.display_mmio_offset + \
+                                     _PICK((pipe), _PALETTE_A,         \
+                                           _PALETTE_B, _CHV_PALETTE_C) + \
+                                     (i) * 4)
 
 /* MCH MMIO space */
 
@@ -4061,15 +4050,27 @@ enum {
 #define _VSYNCSHIFT_B  0x61028
 #define _PIPE_MULT_B   0x6102c
 
+/* DSI 0 timing regs */
+#define _HTOTAL_DSI0           0x6b000
+#define _HSYNC_DSI0            0x6b008
+#define _VTOTAL_DSI0           0x6b00c
+#define _VSYNC_DSI0            0x6b014
+#define _VSYNCSHIFT_DSI0       0x6b028
+
+/* DSI 1 timing regs */
+#define _HTOTAL_DSI1           0x6b800
+#define _HSYNC_DSI1            0x6b808
+#define _VTOTAL_DSI1           0x6b80c
+#define _VSYNC_DSI1            0x6b814
+#define _VSYNCSHIFT_DSI1       0x6b828
+
 #define TRANSCODER_A_OFFSET 0x60000
 #define TRANSCODER_B_OFFSET 0x61000
 #define TRANSCODER_C_OFFSET 0x62000
 #define CHV_TRANSCODER_C_OFFSET 0x63000
 #define TRANSCODER_EDP_OFFSET 0x6f000
-
-#define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \
-       dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
-       dev_priv->info.display_mmio_offset)
+#define TRANSCODER_DSI0_OFFSET 0x6b000
+#define TRANSCODER_DSI1_OFFSET 0x6b800
 
 #define HTOTAL(trans)          _MMIO_TRANS2(trans, _HTOTAL_A)
 #define HBLANK(trans)          _MMIO_TRANS2(trans, _HBLANK_A)
@@ -4149,9 +4150,13 @@ enum {
 /* Bspec claims those aren't shifted but stay at 0x64800 */
 #define EDP_PSR_IMR                            _MMIO(0x64834)
 #define EDP_PSR_IIR                            _MMIO(0x64838)
-#define   EDP_PSR_ERROR(trans)                 (1 << (((trans) * 8 + 10) & 31))
-#define   EDP_PSR_POST_EXIT(trans)             (1 << (((trans) * 8 + 9) & 31))
-#define   EDP_PSR_PRE_ENTRY(trans)             (1 << (((trans) * 8 + 8) & 31))
+#define   EDP_PSR_ERROR(shift)                 (1 << ((shift) + 2))
+#define   EDP_PSR_POST_EXIT(shift)             (1 << ((shift) + 1))
+#define   EDP_PSR_PRE_ENTRY(shift)             (1 << (shift))
+#define   EDP_PSR_TRANSCODER_C_SHIFT           24
+#define   EDP_PSR_TRANSCODER_B_SHIFT           16
+#define   EDP_PSR_TRANSCODER_A_SHIFT           8
+#define   EDP_PSR_TRANSCODER_EDP_SHIFT         0
 
 #define EDP_PSR_AUX_CTL                                _MMIO(dev_priv->psr_mmio_base + 0x10)
 #define   EDP_PSR_AUX_CTL_TIME_OUT_MASK                (3 << 26)
@@ -4195,7 +4200,7 @@ enum {
 #define   EDP_PSR_DEBUG_MASK_LPSP              (1 << 27)
 #define   EDP_PSR_DEBUG_MASK_MEMUP             (1 << 26)
 #define   EDP_PSR_DEBUG_MASK_HPD               (1 << 25)
-#define   EDP_PSR_DEBUG_MASK_DISP_REG_WRITE    (1 << 16)
+#define   EDP_PSR_DEBUG_MASK_DISP_REG_WRITE    (1 << 16) /* Reserved in ICL+ */
 #define   EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */
 
 #define EDP_PSR2_CTL                   _MMIO(0x6f900)
@@ -4232,7 +4237,7 @@ enum {
 #define  PSR_EVENT_FRONT_BUFFER_MODIFY         (1 << 9)
 #define  PSR_EVENT_WD_TIMER_EXPIRE             (1 << 8)
 #define  PSR_EVENT_PIPE_REGISTERS_UPDATE       (1 << 6)
-#define  PSR_EVENT_REGISTER_UPDATE             (1 << 5)
+#define  PSR_EVENT_REGISTER_UPDATE             (1 << 5) /* Reserved in ICL+ */
 #define  PSR_EVENT_HDCP_ENABLE                 (1 << 4)
 #define  PSR_EVENT_KVMR_SESSION_ENABLE         (1 << 3)
 #define  PSR_EVENT_VBI_ENABLE                  (1 << 2)
@@ -4584,6 +4589,15 @@ enum {
 #define   VIDEO_DIP_FREQ_2VSYNC                (2 << 16)
 #define   VIDEO_DIP_FREQ_MASK          (3 << 16)
 /* HSW and later: */
+#define   DRM_DIP_ENABLE               (1 << 28)
+#define   PSR_VSC_BIT_7_SET            (1 << 27)
+#define   VSC_SELECT_MASK              (0x3 << 25)
+#define   VSC_SELECT_SHIFT             25
+#define   VSC_DIP_HW_HEA_DATA          (0 << 25)
+#define   VSC_DIP_HW_HEA_SW_DATA       (1 << 25)
+#define   VSC_DIP_HW_DATA_SW_HEA       (2 << 25)
+#define   VSC_DIP_SW_HEA_DATA          (3 << 25)
+#define   VDIP_ENABLE_PPS              (1 << 24)
 #define   VIDEO_DIP_ENABLE_VSC_HSW     (1 << 20)
 #define   VIDEO_DIP_ENABLE_GCP_HSW     (1 << 16)
 #define   VIDEO_DIP_ENABLE_AVI_HSW     (1 << 12)
@@ -4591,16 +4605,6 @@ enum {
 #define   VIDEO_DIP_ENABLE_GMP_HSW     (1 << 4)
 #define   VIDEO_DIP_ENABLE_SPD_HSW     (1 << 0)
 
-#define  DRM_DIP_ENABLE                        (1 << 28)
-#define  PSR_VSC_BIT_7_SET             (1 << 27)
-#define  VSC_SELECT_MASK               (0x3 << 26)
-#define  VSC_SELECT_SHIFT              26
-#define  VSC_DIP_HW_HEA_DATA           (0 << 26)
-#define  VSC_DIP_HW_HEA_SW_DATA                (1 << 26)
-#define  VSC_DIP_HW_DATA_SW_HEA                (2 << 26)
-#define  VSC_DIP_SW_HEA_DATA           (3 << 26)
-#define  VDIP_ENABLE_PPS               (1 << 24)
-
 /* Panel power sequencing */
 #define PPS_BASE                       0x61200
 #define VLV_PPS_BASE                   (VLV_DISPLAY_BASE + PPS_BASE)
@@ -5636,9 +5640,9 @@ enum {
  */
 #define PIPE_EDP_OFFSET        0x7f000
 
-#define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \
-       dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
-       dev_priv->info.display_mmio_offset)
+/* ICL DSI 0 and 1 */
+#define PIPE_DSI0_OFFSET       0x7b000
+#define PIPE_DSI1_OFFSET       0x7b800
 
 #define PIPECONF(pipe)         _MMIO_PIPE2(pipe, _PIPEACONF)
 #define PIPEDSL(pipe)          _MMIO_PIPE2(pipe, _PIPEADSL)
@@ -6087,10 +6091,6 @@ enum {
 #define _CURBBASE_IVB          0x71084
 #define _CURBPOS_IVB           0x71088
 
-#define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \
-       dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
-       dev_priv->info.display_mmio_offset)
-
 #define CURCNTR(pipe) _CURSOR2(pipe, _CURACNTR)
 #define CURBASE(pipe) _CURSOR2(pipe, _CURABASE)
 #define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS)
@@ -6224,6 +6224,10 @@ enum {
 #define _DSPBOFFSET            (dev_priv->info.display_mmio_offset + 0x711A4)
 #define _DSPBSURFLIVE          (dev_priv->info.display_mmio_offset + 0x711AC)
 
+/* ICL DSI 0 and 1 */
+#define _PIPEDSI0CONF          0x7b008
+#define _PIPEDSI1CONF          0x7b808
+
 /* Sprite A control */
 #define _DVSACNTR              0x72180
 #define   DVS_ENABLE           (1 << 31)
@@ -6511,6 +6515,7 @@ enum {
 #define   PLANE_CTL_KEY_ENABLE_DESTINATION     (2 << 21)
 #define   PLANE_CTL_ORDER_BGRX                 (0 << 20)
 #define   PLANE_CTL_ORDER_RGBX                 (1 << 20)
+#define   PLANE_CTL_YUV420_Y_PLANE             (1 << 19)
 #define   PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709        (1 << 18)
 #define   PLANE_CTL_YUV422_ORDER_MASK          (0x3 << 16)
 #define   PLANE_CTL_YUV422_YUYV                        (0 << 16)
@@ -6554,17 +6559,33 @@ enum {
 #define _PLANE_KEYVAL_2_A                      0x70294
 #define _PLANE_KEYMSK_1_A                      0x70198
 #define _PLANE_KEYMSK_2_A                      0x70298
+#define  PLANE_KEYMSK_ALPHA_ENABLE             (1 << 31)
 #define _PLANE_KEYMAX_1_A                      0x701a0
 #define _PLANE_KEYMAX_2_A                      0x702a0
+#define  PLANE_KEYMAX_ALPHA(a)                 ((a) << 24)
 #define _PLANE_AUX_DIST_1_A                    0x701c0
 #define _PLANE_AUX_DIST_2_A                    0x702c0
 #define _PLANE_AUX_OFFSET_1_A                  0x701c4
 #define _PLANE_AUX_OFFSET_2_A                  0x702c4
+#define _PLANE_CUS_CTL_1_A                     0x701c8
+#define _PLANE_CUS_CTL_2_A                     0x702c8
+#define  PLANE_CUS_ENABLE                      (1 << 31)
+#define  PLANE_CUS_PLANE_6                     (0 << 30)
+#define  PLANE_CUS_PLANE_7                     (1 << 30)
+#define  PLANE_CUS_HPHASE_SIGN_NEGATIVE                (1 << 19)
+#define  PLANE_CUS_HPHASE_0                    (0 << 16)
+#define  PLANE_CUS_HPHASE_0_25                 (1 << 16)
+#define  PLANE_CUS_HPHASE_0_5                  (2 << 16)
+#define  PLANE_CUS_VPHASE_SIGN_NEGATIVE                (1 << 15)
+#define  PLANE_CUS_VPHASE_0                    (0 << 12)
+#define  PLANE_CUS_VPHASE_0_25                 (1 << 12)
+#define  PLANE_CUS_VPHASE_0_5                  (2 << 12)
 #define _PLANE_COLOR_CTL_1_A                   0x701CC /* GLK+ */
 #define _PLANE_COLOR_CTL_2_A                   0x702CC /* GLK+ */
 #define _PLANE_COLOR_CTL_3_A                   0x703CC /* GLK+ */
 #define   PLANE_COLOR_PIPE_GAMMA_ENABLE                (1 << 30) /* Pre-ICL */
 #define   PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE     (1 << 28)
+#define   PLANE_COLOR_INPUT_CSC_ENABLE         (1 << 20) /* ICL+ */
 #define   PLANE_COLOR_PIPE_CSC_ENABLE          (1 << 23) /* Pre-ICL */
 #define   PLANE_COLOR_CSC_MODE_BYPASS                  (0 << 17)
 #define   PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709                (1 << 17)
@@ -6581,6 +6602,55 @@ enum {
 #define _PLANE_NV12_BUF_CFG_1_A                0x70278
 #define _PLANE_NV12_BUF_CFG_2_A                0x70378
 
+/* Input CSC Register Definitions */
+#define _PLANE_INPUT_CSC_RY_GY_1_A     0x701E0
+#define _PLANE_INPUT_CSC_RY_GY_2_A     0x702E0
+
+#define _PLANE_INPUT_CSC_RY_GY_1_B     0x711E0
+#define _PLANE_INPUT_CSC_RY_GY_2_B     0x712E0
+
+#define _PLANE_INPUT_CSC_RY_GY_1(pipe) \
+       _PIPE(pipe, _PLANE_INPUT_CSC_RY_GY_1_A, \
+            _PLANE_INPUT_CSC_RY_GY_1_B)
+#define _PLANE_INPUT_CSC_RY_GY_2(pipe) \
+       _PIPE(pipe, _PLANE_INPUT_CSC_RY_GY_2_A, \
+            _PLANE_INPUT_CSC_RY_GY_2_B)
+
+#define PLANE_INPUT_CSC_COEFF(pipe, plane, index)      \
+       _MMIO_PLANE(plane, _PLANE_INPUT_CSC_RY_GY_1(pipe) +  (index) * 4, \
+                   _PLANE_INPUT_CSC_RY_GY_2(pipe) + (index) * 4)
+
+#define _PLANE_INPUT_CSC_PREOFF_HI_1_A         0x701F8
+#define _PLANE_INPUT_CSC_PREOFF_HI_2_A         0x702F8
+
+#define _PLANE_INPUT_CSC_PREOFF_HI_1_B         0x711F8
+#define _PLANE_INPUT_CSC_PREOFF_HI_2_B         0x712F8
+
+#define _PLANE_INPUT_CSC_PREOFF_HI_1(pipe)     \
+       _PIPE(pipe, _PLANE_INPUT_CSC_PREOFF_HI_1_A, \
+            _PLANE_INPUT_CSC_PREOFF_HI_1_B)
+#define _PLANE_INPUT_CSC_PREOFF_HI_2(pipe)     \
+       _PIPE(pipe, _PLANE_INPUT_CSC_PREOFF_HI_2_A, \
+            _PLANE_INPUT_CSC_PREOFF_HI_2_B)
+#define PLANE_INPUT_CSC_PREOFF(pipe, plane, index)     \
+       _MMIO_PLANE(plane, _PLANE_INPUT_CSC_PREOFF_HI_1(pipe) + (index) * 4, \
+                   _PLANE_INPUT_CSC_PREOFF_HI_2(pipe) + (index) * 4)
+
+#define _PLANE_INPUT_CSC_POSTOFF_HI_1_A                0x70204
+#define _PLANE_INPUT_CSC_POSTOFF_HI_2_A                0x70304
+
+#define _PLANE_INPUT_CSC_POSTOFF_HI_1_B                0x71204
+#define _PLANE_INPUT_CSC_POSTOFF_HI_2_B                0x71304
+
+#define _PLANE_INPUT_CSC_POSTOFF_HI_1(pipe)    \
+       _PIPE(pipe, _PLANE_INPUT_CSC_POSTOFF_HI_1_A, \
+            _PLANE_INPUT_CSC_POSTOFF_HI_1_B)
+#define _PLANE_INPUT_CSC_POSTOFF_HI_2(pipe)    \
+       _PIPE(pipe, _PLANE_INPUT_CSC_POSTOFF_HI_2_A, \
+            _PLANE_INPUT_CSC_POSTOFF_HI_2_B)
+#define PLANE_INPUT_CSC_POSTOFF(pipe, plane, index)    \
+       _MMIO_PLANE(plane, _PLANE_INPUT_CSC_POSTOFF_HI_1(pipe) + (index) * 4, \
+                   _PLANE_INPUT_CSC_POSTOFF_HI_2(pipe) + (index) * 4)
 
 #define _PLANE_CTL_1_B                         0x71180
 #define _PLANE_CTL_2_B                         0x71280
@@ -6697,6 +6767,15 @@ enum {
 #define PLANE_AUX_OFFSET(pipe, plane)   \
        _MMIO_PLANE(plane, _PLANE_AUX_OFFSET_1(pipe), _PLANE_AUX_OFFSET_2(pipe))
 
+#define _PLANE_CUS_CTL_1_B             0x711c8
+#define _PLANE_CUS_CTL_2_B             0x712c8
+#define _PLANE_CUS_CTL_1(pipe)       \
+               _PIPE(pipe, _PLANE_CUS_CTL_1_A, _PLANE_CUS_CTL_1_B)
+#define _PLANE_CUS_CTL_2(pipe)       \
+               _PIPE(pipe, _PLANE_CUS_CTL_2_A, _PLANE_CUS_CTL_2_B)
+#define PLANE_CUS_CTL(pipe, plane)   \
+       _MMIO_PLANE(plane, _PLANE_CUS_CTL_1(pipe), _PLANE_CUS_CTL_2(pipe))
+
 #define _PLANE_COLOR_CTL_1_B                   0x711CC
 #define _PLANE_COLOR_CTL_2_B                   0x712CC
 #define _PLANE_COLOR_CTL_3_B                   0x713CC
@@ -6850,11 +6929,12 @@ enum {
 #define _PS_2B_CTRL      0x68A80
 #define _PS_1C_CTRL      0x69180
 #define PS_SCALER_EN        (1 << 31)
-#define PS_SCALER_MODE_MASK (3 << 28)
-#define PS_SCALER_MODE_DYN  (0 << 28)
-#define PS_SCALER_MODE_HQ  (1 << 28)
+#define SKL_PS_SCALER_MODE_MASK (3 << 28)
+#define SKL_PS_SCALER_MODE_DYN  (0 << 28)
+#define SKL_PS_SCALER_MODE_HQ  (1 << 28)
 #define SKL_PS_SCALER_MODE_NV12 (2 << 28)
 #define PS_SCALER_MODE_PLANAR (1 << 29)
+#define PS_SCALER_MODE_NORMAL (0 << 29)
 #define PS_PLANE_SEL_MASK  (7 << 25)
 #define PS_PLANE_SEL(plane) (((plane) + 1) << 25)
 #define PS_FILTER_MASK         (3 << 23)
@@ -6871,6 +6951,8 @@ enum {
 #define PS_VADAPT_MODE_LEAST_ADAPT (0 << 5)
 #define PS_VADAPT_MODE_MOD_ADAPT   (1 << 5)
 #define PS_VADAPT_MODE_MOST_ADAPT  (3 << 5)
+#define PS_PLANE_Y_SEL_MASK  (7 << 5)
+#define PS_PLANE_Y_SEL(plane) (((plane) + 1) << 5)
 
 #define _PS_PWR_GATE_1A     0x68160
 #define _PS_PWR_GATE_2A     0x68260
@@ -7317,9 +7399,10 @@ enum {
 #define  BDW_DPRS_MASK_VBLANK_SRD      (1 << 0)
 #define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
 
-#define CHICKEN_TRANS_A         0x420c0
-#define CHICKEN_TRANS_B         0x420c4
-#define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B)
+#define CHICKEN_TRANS_A                _MMIO(0x420c0)
+#define CHICKEN_TRANS_B                _MMIO(0x420c4)
+#define CHICKEN_TRANS_C                _MMIO(0x420c8)
+#define CHICKEN_TRANS_EDP      _MMIO(0x420cc)
 #define  VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */
 #define  DDI_TRAINING_OVERRIDE_ENABLE  (1 << 19)
 #define  DDI_TRAINING_OVERRIDE_VALUE   (1 << 18)
@@ -7409,6 +7492,10 @@ enum {
 #define GEN9_SLICE_COMMON_ECO_CHICKEN1         _MMIO(0x731c)
 #define   GEN11_STATE_CACHE_REDIRECT_TO_CS     (1 << 11)
 
+#define GEN7_SARCHKMD                          _MMIO(0xB000)
+#define GEN7_DISABLE_DEMAND_PREFETCH           (1 << 31)
+#define GEN7_DISABLE_SAMPLER_PREFETCH           (1 << 30)
+
 #define GEN7_L3SQCREG1                         _MMIO(0xB010)
 #define  VLV_B0_WA_L3SQCREG1_VALUE             0x00D30000
 
@@ -7824,8 +7911,7 @@ enum {
 #define  CNP_RAWCLK_DIV_MASK   (0x3ff << 16)
 #define  CNP_RAWCLK_DIV(div)   ((div) << 16)
 #define  CNP_RAWCLK_FRAC_MASK  (0xf << 26)
-#define  CNP_RAWCLK_FRAC(frac) ((frac) << 26)
-#define  ICP_RAWCLK_DEN(den)   ((den) << 26)
+#define  CNP_RAWCLK_DEN(den)   ((den) << 26)
 #define  ICP_RAWCLK_NUM(num)   ((num) << 11)
 
 #define PCH_DPLL_TMR_CFG        _MMIO(0xc6208)
@@ -8625,8 +8711,7 @@ enum {
 #define   GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC     (1 << 9)
 #define   GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC      (1 << 7)
 
-#define GAMW_ECO_DEV_RW_IA_REG                 _MMIO(0x4080)
-#define   GAMW_ECO_DEV_CTX_RELOAD_DISABLE      (1 << 7)
+#define GEN10_SAMPLER_MODE             _MMIO(0xE18C)
 
 /* IVYBRIDGE DPF */
 #define GEN7_L3CDERRST1(slice)         _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
@@ -8927,6 +9012,15 @@ enum skl_power_gate {
 #define   CNL_AUX_ANAOVRD1_ENABLE      (1 << 16)
 #define   CNL_AUX_ANAOVRD1_LDO_BYPASS  (1 << 23)
 
+#define _ICL_AUX_REG_IDX(pw_idx)       ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
+#define _ICL_AUX_ANAOVRD1_A            0x162398
+#define _ICL_AUX_ANAOVRD1_B            0x6C398
+#define ICL_AUX_ANAOVRD1(pw_idx)       _MMIO(_PICK(_ICL_AUX_REG_IDX(pw_idx), \
+                                                   _ICL_AUX_ANAOVRD1_A, \
+                                                   _ICL_AUX_ANAOVRD1_B))
+#define   ICL_AUX_ANAOVRD1_LDO_BYPASS  (1 << 7)
+#define   ICL_AUX_ANAOVRD1_ENABLE      (1 << 0)
+
 /* HDCP Key Registers */
 #define HDCP_KEY_CONF                  _MMIO(0x66c00)
 #define  HDCP_AKSV_SEND_TRIGGER                BIT(31)
@@ -9009,11 +9103,45 @@ enum skl_power_gate {
 #define  HDCP_STATUS_CIPHER            BIT(16)
 #define  HDCP_STATUS_FRAME_CNT(x)      (((x) >> 8) & 0xff)
 
+/* HDCP2.2 Registers */
+#define _PORTA_HDCP2_BASE              0x66800
+#define _PORTB_HDCP2_BASE              0x66500
+#define _PORTC_HDCP2_BASE              0x66600
+#define _PORTD_HDCP2_BASE              0x66700
+#define _PORTE_HDCP2_BASE              0x66A00
+#define _PORTF_HDCP2_BASE              0x66900
+#define _PORT_HDCP2_BASE(port, x)      _MMIO(_PICK((port), \
+                                         _PORTA_HDCP2_BASE, \
+                                         _PORTB_HDCP2_BASE, \
+                                         _PORTC_HDCP2_BASE, \
+                                         _PORTD_HDCP2_BASE, \
+                                         _PORTE_HDCP2_BASE, \
+                                         _PORTF_HDCP2_BASE) + (x))
+
+#define HDCP2_AUTH_DDI(port)           _PORT_HDCP2_BASE(port, 0x98)
+#define   AUTH_LINK_AUTHENTICATED      BIT(31)
+#define   AUTH_LINK_TYPE               BIT(30)
+#define   AUTH_FORCE_CLR_INPUTCTR      BIT(19)
+#define   AUTH_CLR_KEYS                        BIT(18)
+
+#define HDCP2_CTL_DDI(port)            _PORT_HDCP2_BASE(port, 0xB0)
+#define   CTL_LINK_ENCRYPTION_REQ      BIT(31)
+
+#define HDCP2_STATUS_DDI(port)         _PORT_HDCP2_BASE(port, 0xB4)
+#define   STREAM_ENCRYPTION_STATUS_A   BIT(31)
+#define   STREAM_ENCRYPTION_STATUS_B   BIT(30)
+#define   STREAM_ENCRYPTION_STATUS_C   BIT(29)
+#define   LINK_TYPE_STATUS             BIT(22)
+#define   LINK_AUTH_STATUS             BIT(21)
+#define   LINK_ENCRYPTION_STATUS       BIT(20)
+
 /* Per-pipe DDI Function Control */
 #define _TRANS_DDI_FUNC_CTL_A          0x60400
 #define _TRANS_DDI_FUNC_CTL_B          0x61400
 #define _TRANS_DDI_FUNC_CTL_C          0x62400
 #define _TRANS_DDI_FUNC_CTL_EDP                0x6F400
+#define _TRANS_DDI_FUNC_CTL_DSI0       0x6b400
+#define _TRANS_DDI_FUNC_CTL_DSI1       0x6bc00
 #define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A)
 
 #define  TRANS_DDI_FUNC_ENABLE         (1 << 31)
@@ -9051,6 +9179,19 @@ enum skl_power_gate {
                                        | TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \
                                        | TRANS_DDI_HDMI_SCRAMBLING)
 
+#define _TRANS_DDI_FUNC_CTL2_A         0x60404
+#define _TRANS_DDI_FUNC_CTL2_B         0x61404
+#define _TRANS_DDI_FUNC_CTL2_C         0x62404
+#define _TRANS_DDI_FUNC_CTL2_EDP       0x6f404
+#define _TRANS_DDI_FUNC_CTL2_DSI0      0x6b404
+#define _TRANS_DDI_FUNC_CTL2_DSI1      0x6bc04
+#define TRANS_DDI_FUNC_CTL2(tran)      _MMIO_TRANS2(tran, \
+                                                    _TRANS_DDI_FUNC_CTL2_A)
+#define  PORT_SYNC_MODE_ENABLE                 (1 << 4)
+#define  PORT_SYNC_MODE_MASTER_SELECT(x)       ((x) < 0)
+#define  PORT_SYNC_MODE_MASTER_SELECT_MASK     (0x7 << 0)
+#define  PORT_SYNC_MODE_MASTER_SELECT_SHIFT    0
+
 /* DisplayPort Transport Control */
 #define _DP_TP_CTL_A                   0x64040
 #define _DP_TP_CTL_B                   0x64140
@@ -9222,6 +9363,8 @@ enum skl_power_gate {
 #define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
 
 #define  TRANS_MSA_SYNC_CLK            (1 << 0)
+#define  TRANS_MSA_SAMPLING_444                (2 << 1)
+#define  TRANS_MSA_CLRSP_YCBCR         (2 << 3)
 #define  TRANS_MSA_6_BPC               (0 << 5)
 #define  TRANS_MSA_8_BPC               (1 << 5)
 #define  TRANS_MSA_10_BPC              (2 << 5)
@@ -9789,6 +9932,10 @@ enum skl_power_gate {
 #define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c)    /* ports A and C only */
 #define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
 
+/* Gen11 DSI */
+#define _MMIO_DSI(tc, dsi0, dsi1)      _MMIO_TRANS((tc) - TRANSCODER_DSI_0, \
+                                                   dsi0, dsi1)
+
 #define MIPIO_TXESC_CLK_DIV1                   _MMIO(0x160004)
 #define  GLK_TX_ESC_CLK_DIV1_MASK                      0x3FF
 #define MIPIO_TXESC_CLK_DIV2                   _MMIO(0x160008)
@@ -9952,6 +10099,39 @@ enum skl_power_gate {
                                                    _ICL_DSI_IO_MODECTL_1)
 #define  COMBO_PHY_MODE_DSI                            (1 << 0)
 
+/* Display Stream Splitter Control */
+#define DSS_CTL1                               _MMIO(0x67400)
+#define  SPLITTER_ENABLE                       (1 << 31)
+#define  JOINER_ENABLE                         (1 << 30)
+#define  DUAL_LINK_MODE_INTERLEAVE             (1 << 24)
+#define  DUAL_LINK_MODE_FRONTBACK              (0 << 24)
+#define  OVERLAP_PIXELS_MASK                   (0xf << 16)
+#define  OVERLAP_PIXELS(pixels)                        ((pixels) << 16)
+#define  LEFT_DL_BUF_TARGET_DEPTH_MASK         (0xfff << 0)
+#define  LEFT_DL_BUF_TARGET_DEPTH(pixels)      ((pixels) << 0)
+#define  MAX_DL_BUFFER_TARGET_DEPTH            0x5a0
+
+#define DSS_CTL2                               _MMIO(0x67404)
+#define  LEFT_BRANCH_VDSC_ENABLE               (1 << 31)
+#define  RIGHT_BRANCH_VDSC_ENABLE              (1 << 15)
+#define  RIGHT_DL_BUF_TARGET_DEPTH_MASK                (0xfff << 0)
+#define  RIGHT_DL_BUF_TARGET_DEPTH(pixels)     ((pixels) << 0)
+
+#define _ICL_PIPE_DSS_CTL1_PB                  0x78200
+#define _ICL_PIPE_DSS_CTL1_PC                  0x78400
+#define ICL_PIPE_DSS_CTL1(pipe)                        _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_PIPE_DSS_CTL1_PB, \
+                                                          _ICL_PIPE_DSS_CTL1_PC)
+#define  BIG_JOINER_ENABLE                     (1 << 29)
+#define  MASTER_BIG_JOINER_ENABLE              (1 << 28)
+#define  VGA_CENTERING_ENABLE                  (1 << 27)
+
+#define _ICL_PIPE_DSS_CTL2_PB                  0x78204
+#define _ICL_PIPE_DSS_CTL2_PC                  0x78404
+#define ICL_PIPE_DSS_CTL2(pipe)                        _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_PIPE_DSS_CTL2_PB, \
+                                                          _ICL_PIPE_DSS_CTL2_PC)
+
 #define BXT_P_DSI_REGULATOR_CFG                        _MMIO(0x160020)
 #define  STAP_SELECT                                   (1 << 0)
 
@@ -10288,6 +10468,235 @@ enum skl_power_gate {
                                                   _ICL_DSI_T_INIT_MASTER_0,\
                                                   _ICL_DSI_T_INIT_MASTER_1)
 
+#define _DPHY_CLK_TIMING_PARAM_0       0x162180
+#define _DPHY_CLK_TIMING_PARAM_1       0x6c180
+#define DPHY_CLK_TIMING_PARAM(port)    _MMIO_PORT(port,        \
+                                                  _DPHY_CLK_TIMING_PARAM_0,\
+                                                  _DPHY_CLK_TIMING_PARAM_1)
+#define _DSI_CLK_TIMING_PARAM_0                0x6b080
+#define _DSI_CLK_TIMING_PARAM_1                0x6b880
+#define DSI_CLK_TIMING_PARAM(port)     _MMIO_PORT(port,        \
+                                                  _DSI_CLK_TIMING_PARAM_0,\
+                                                  _DSI_CLK_TIMING_PARAM_1)
+#define  CLK_PREPARE_OVERRIDE          (1 << 31)
+#define  CLK_PREPARE(x)                ((x) << 28)
+#define  CLK_PREPARE_MASK              (0x7 << 28)
+#define  CLK_PREPARE_SHIFT             28
+#define  CLK_ZERO_OVERRIDE             (1 << 27)
+#define  CLK_ZERO(x)                   ((x) << 20)
+#define  CLK_ZERO_MASK                 (0xf << 20)
+#define  CLK_ZERO_SHIFT                20
+#define  CLK_PRE_OVERRIDE              (1 << 19)
+#define  CLK_PRE(x)                    ((x) << 16)
+#define  CLK_PRE_MASK                  (0x3 << 16)
+#define  CLK_PRE_SHIFT                 16
+#define  CLK_POST_OVERRIDE             (1 << 15)
+#define  CLK_POST(x)                   ((x) << 8)
+#define  CLK_POST_MASK                 (0x7 << 8)
+#define  CLK_POST_SHIFT                8
+#define  CLK_TRAIL_OVERRIDE            (1 << 7)
+#define  CLK_TRAIL(x)                  ((x) << 0)
+#define  CLK_TRAIL_MASK                (0xf << 0)
+#define  CLK_TRAIL_SHIFT               0
+
+#define _DPHY_DATA_TIMING_PARAM_0      0x162184
+#define _DPHY_DATA_TIMING_PARAM_1      0x6c184
+#define DPHY_DATA_TIMING_PARAM(port)   _MMIO_PORT(port,        \
+                                                  _DPHY_DATA_TIMING_PARAM_0,\
+                                                  _DPHY_DATA_TIMING_PARAM_1)
+#define _DSI_DATA_TIMING_PARAM_0       0x6B084
+#define _DSI_DATA_TIMING_PARAM_1       0x6B884
+#define DSI_DATA_TIMING_PARAM(port)    _MMIO_PORT(port,        \
+                                                  _DSI_DATA_TIMING_PARAM_0,\
+                                                  _DSI_DATA_TIMING_PARAM_1)
+#define  HS_PREPARE_OVERRIDE           (1 << 31)
+#define  HS_PREPARE(x)                 ((x) << 24)
+#define  HS_PREPARE_MASK               (0x7 << 24)
+#define  HS_PREPARE_SHIFT              24
+#define  HS_ZERO_OVERRIDE              (1 << 23)
+#define  HS_ZERO(x)                    ((x) << 16)
+#define  HS_ZERO_MASK                  (0xf << 16)
+#define  HS_ZERO_SHIFT                 16
+#define  HS_TRAIL_OVERRIDE             (1 << 15)
+#define  HS_TRAIL(x)                   ((x) << 8)
+#define  HS_TRAIL_MASK                 (0x7 << 8)
+#define  HS_TRAIL_SHIFT                8
+#define  HS_EXIT_OVERRIDE              (1 << 7)
+#define  HS_EXIT(x)                    ((x) << 0)
+#define  HS_EXIT_MASK                  (0x7 << 0)
+#define  HS_EXIT_SHIFT                 0
+
+#define _DPHY_TA_TIMING_PARAM_0                0x162188
+#define _DPHY_TA_TIMING_PARAM_1                0x6c188
+#define DPHY_TA_TIMING_PARAM(port)     _MMIO_PORT(port,        \
+                                                  _DPHY_TA_TIMING_PARAM_0,\
+                                                  _DPHY_TA_TIMING_PARAM_1)
+#define _DSI_TA_TIMING_PARAM_0         0x6b098
+#define _DSI_TA_TIMING_PARAM_1         0x6b898
+#define DSI_TA_TIMING_PARAM(port)      _MMIO_PORT(port,        \
+                                                  _DSI_TA_TIMING_PARAM_0,\
+                                                  _DSI_TA_TIMING_PARAM_1)
+#define  TA_SURE_OVERRIDE              (1 << 31)
+#define  TA_SURE(x)                    ((x) << 16)
+#define  TA_SURE_MASK                  (0x1f << 16)
+#define  TA_SURE_SHIFT                 16
+#define  TA_GO_OVERRIDE                (1 << 15)
+#define  TA_GO(x)                      ((x) << 8)
+#define  TA_GO_MASK                    (0xf << 8)
+#define  TA_GO_SHIFT                   8
+#define  TA_GET_OVERRIDE               (1 << 7)
+#define  TA_GET(x)                     ((x) << 0)
+#define  TA_GET_MASK                   (0xf << 0)
+#define  TA_GET_SHIFT                  0
+
+/* DSI transcoder configuration */
+#define _DSI_TRANS_FUNC_CONF_0         0x6b030
+#define _DSI_TRANS_FUNC_CONF_1         0x6b830
+#define DSI_TRANS_FUNC_CONF(tc)                _MMIO_DSI(tc,   \
+                                                 _DSI_TRANS_FUNC_CONF_0,\
+                                                 _DSI_TRANS_FUNC_CONF_1)
+#define  OP_MODE_MASK                  (0x3 << 28)
+#define  OP_MODE_SHIFT                 28
+#define  CMD_MODE_NO_GATE              (0x0 << 28)
+#define  CMD_MODE_TE_GATE              (0x1 << 28)
+#define  VIDEO_MODE_SYNC_EVENT         (0x2 << 28)
+#define  VIDEO_MODE_SYNC_PULSE         (0x3 << 28)
+#define  LINK_READY                    (1 << 20)
+#define  PIX_FMT_MASK                  (0x3 << 16)
+#define  PIX_FMT_SHIFT                 16
+#define  PIX_FMT_RGB565                        (0x0 << 16)
+#define  PIX_FMT_RGB666_PACKED         (0x1 << 16)
+#define  PIX_FMT_RGB666_LOOSE          (0x2 << 16)
+#define  PIX_FMT_RGB888                        (0x3 << 16)
+#define  PIX_FMT_RGB101010             (0x4 << 16)
+#define  PIX_FMT_RGB121212             (0x5 << 16)
+#define  PIX_FMT_COMPRESSED            (0x6 << 16)
+#define  BGR_TRANSMISSION              (1 << 15)
+#define  PIX_VIRT_CHAN(x)              ((x) << 12)
+#define  PIX_VIRT_CHAN_MASK            (0x3 << 12)
+#define  PIX_VIRT_CHAN_SHIFT           12
+#define  PIX_BUF_THRESHOLD_MASK                (0x3 << 10)
+#define  PIX_BUF_THRESHOLD_SHIFT       10
+#define  PIX_BUF_THRESHOLD_1_4         (0x0 << 10)
+#define  PIX_BUF_THRESHOLD_1_2         (0x1 << 10)
+#define  PIX_BUF_THRESHOLD_3_4         (0x2 << 10)
+#define  PIX_BUF_THRESHOLD_FULL                (0x3 << 10)
+#define  CONTINUOUS_CLK_MASK           (0x3 << 8)
+#define  CONTINUOUS_CLK_SHIFT          8
+#define  CLK_ENTER_LP_AFTER_DATA       (0x0 << 8)
+#define  CLK_HS_OR_LP                  (0x2 << 8)
+#define  CLK_HS_CONTINUOUS             (0x3 << 8)
+#define  LINK_CALIBRATION_MASK         (0x3 << 4)
+#define  LINK_CALIBRATION_SHIFT                4
+#define  CALIBRATION_DISABLED          (0x0 << 4)
+#define  CALIBRATION_ENABLED_INITIAL_ONLY      (0x2 << 4)
+#define  CALIBRATION_ENABLED_INITIAL_PERIODIC  (0x3 << 4)
+#define  S3D_ORIENTATION_LANDSCAPE     (1 << 1)
+#define  EOTP_DISABLED                 (1 << 0)
+
+#define _DSI_CMD_RXCTL_0               0x6b0d4
+#define _DSI_CMD_RXCTL_1               0x6b8d4
+#define DSI_CMD_RXCTL(tc)              _MMIO_DSI(tc,   \
+                                                 _DSI_CMD_RXCTL_0,\
+                                                 _DSI_CMD_RXCTL_1)
+#define  READ_UNLOADS_DW               (1 << 16)
+#define  RECEIVED_UNASSIGNED_TRIGGER   (1 << 15)
+#define  RECEIVED_ACKNOWLEDGE_TRIGGER  (1 << 14)
+#define  RECEIVED_TEAR_EFFECT_TRIGGER  (1 << 13)
+#define  RECEIVED_RESET_TRIGGER                (1 << 12)
+#define  RECEIVED_PAYLOAD_WAS_LOST     (1 << 11)
+#define  RECEIVED_CRC_WAS_LOST         (1 << 10)
+#define  NUMBER_RX_PLOAD_DW_MASK       (0xff << 0)
+#define  NUMBER_RX_PLOAD_DW_SHIFT      0
+
+#define _DSI_CMD_TXCTL_0               0x6b0d0
+#define _DSI_CMD_TXCTL_1               0x6b8d0
+#define DSI_CMD_TXCTL(tc)              _MMIO_DSI(tc,   \
+                                                 _DSI_CMD_TXCTL_0,\
+                                                 _DSI_CMD_TXCTL_1)
+#define  KEEP_LINK_IN_HS               (1 << 24)
+#define  FREE_HEADER_CREDIT_MASK       (0x1f << 8)
+#define  FREE_HEADER_CREDIT_SHIFT      0x8
+#define  FREE_PLOAD_CREDIT_MASK                (0xff << 0)
+#define  FREE_PLOAD_CREDIT_SHIFT       0
+#define  MAX_HEADER_CREDIT             0x10
+#define  MAX_PLOAD_CREDIT              0x40
+
+#define _DSI_CMD_TXHDR_0               0x6b100
+#define _DSI_CMD_TXHDR_1               0x6b900
+#define DSI_CMD_TXHDR(tc)              _MMIO_DSI(tc,   \
+                                                 _DSI_CMD_TXHDR_0,\
+                                                 _DSI_CMD_TXHDR_1)
+#define  PAYLOAD_PRESENT               (1 << 31)
+#define  LP_DATA_TRANSFER              (1 << 30)
+#define  VBLANK_FENCE                  (1 << 29)
+#define  PARAM_WC_MASK                 (0xffff << 8)
+#define  PARAM_WC_LOWER_SHIFT          8
+#define  PARAM_WC_UPPER_SHIFT          16
+#define  VC_MASK                       (0x3 << 6)
+#define  VC_SHIFT                      6
+#define  DT_MASK                       (0x3f << 0)
+#define  DT_SHIFT                      0
+
+#define _DSI_CMD_TXPYLD_0              0x6b104
+#define _DSI_CMD_TXPYLD_1              0x6b904
+#define DSI_CMD_TXPYLD(tc)             _MMIO_DSI(tc,   \
+                                                 _DSI_CMD_TXPYLD_0,\
+                                                 _DSI_CMD_TXPYLD_1)
+
+#define _DSI_LP_MSG_0                  0x6b0d8
+#define _DSI_LP_MSG_1                  0x6b8d8
+#define DSI_LP_MSG(tc)                 _MMIO_DSI(tc,   \
+                                                 _DSI_LP_MSG_0,\
+                                                 _DSI_LP_MSG_1)
+#define  LPTX_IN_PROGRESS              (1 << 17)
+#define  LINK_IN_ULPS                  (1 << 16)
+#define  LINK_ULPS_TYPE_LP11           (1 << 8)
+#define  LINK_ENTER_ULPS               (1 << 0)
+
+/* DSI timeout registers */
+#define _DSI_HSTX_TO_0                 0x6b044
+#define _DSI_HSTX_TO_1                 0x6b844
+#define DSI_HSTX_TO(tc)                        _MMIO_DSI(tc,   \
+                                                 _DSI_HSTX_TO_0,\
+                                                 _DSI_HSTX_TO_1)
+#define  HSTX_TIMEOUT_VALUE_MASK       (0xffff << 16)
+#define  HSTX_TIMEOUT_VALUE_SHIFT      16
+#define  HSTX_TIMEOUT_VALUE(x)         ((x) << 16)
+#define  HSTX_TIMED_OUT                        (1 << 0)
+
+#define _DSI_LPRX_HOST_TO_0            0x6b048
+#define _DSI_LPRX_HOST_TO_1            0x6b848
+#define DSI_LPRX_HOST_TO(tc)           _MMIO_DSI(tc,   \
+                                                 _DSI_LPRX_HOST_TO_0,\
+                                                 _DSI_LPRX_HOST_TO_1)
+#define  LPRX_TIMED_OUT                        (1 << 16)
+#define  LPRX_TIMEOUT_VALUE_MASK       (0xffff << 0)
+#define  LPRX_TIMEOUT_VALUE_SHIFT      0
+#define  LPRX_TIMEOUT_VALUE(x)         ((x) << 0)
+
+#define _DSI_PWAIT_TO_0                        0x6b040
+#define _DSI_PWAIT_TO_1                        0x6b840
+#define DSI_PWAIT_TO(tc)               _MMIO_DSI(tc,   \
+                                                 _DSI_PWAIT_TO_0,\
+                                                 _DSI_PWAIT_TO_1)
+#define  PRESET_TIMEOUT_VALUE_MASK     (0xffff << 16)
+#define  PRESET_TIMEOUT_VALUE_SHIFT    16
+#define  PRESET_TIMEOUT_VALUE(x)       ((x) << 16)
+#define  PRESPONSE_TIMEOUT_VALUE_MASK  (0xffff << 0)
+#define  PRESPONSE_TIMEOUT_VALUE_SHIFT 0
+#define  PRESPONSE_TIMEOUT_VALUE(x)    ((x) << 0)
+
+#define _DSI_TA_TO_0                   0x6b04c
+#define _DSI_TA_TO_1                   0x6b84c
+#define DSI_TA_TO(tc)                  _MMIO_DSI(tc,   \
+                                                 _DSI_TA_TO_0,\
+                                                 _DSI_TA_TO_1)
+#define  TA_TIMED_OUT                  (1 << 16)
+#define  TA_TIMEOUT_VALUE_MASK         (0xffff << 0)
+#define  TA_TIMEOUT_VALUE_SHIFT                0
+#define  TA_TIMEOUT_VALUE(x)           ((x) << 0)
+
 /* bits 31:0 */
 #define _MIPIA_DBI_BW_CTRL             (dev_priv->mipi_mmio_base + 0xb084)
 #define _MIPIC_DBI_BW_CTRL             (dev_priv->mipi_mmio_base + 0xb884)
@@ -10400,10 +10809,6 @@ enum skl_power_gate {
 #define MIPI_READ_DATA_VALID(port)     _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
 #define  READ_DATA_VALID(n)                            (1 << (n))
 
-/* For UMS only (deprecated): */
-#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
-#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
-
 /* MOCS (Memory Object Control State) registers */
 #define GEN9_LNCFCMOCS(i)      _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */
 
@@ -10689,6 +11094,7 @@ enum skl_power_gate {
 #define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe)        _MMIO_PIPE((pipe) - PIPE_B, \
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC)
+#define  DSC_SLICE_ROW_PER_FRAME(slice_row_per_frame)  ((slice_row_per_frame) << 20)
 #define  DSC_SLICE_PER_LINE(slice_per_line)            ((slice_per_line) << 16)
 #define  DSC_SLICE_CHUNK_SIZE(slice_chunk_size)                ((slice_chunk_size) << 0)
 
@@ -10743,17 +11149,17 @@ enum skl_power_gate {
                                                _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \
                                                _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC)
 
-#define PORT_TX_DFLEXDPSP                      _MMIO(0x1638A0)
+#define PORT_TX_DFLEXDPSP                      _MMIO(FIA1_BASE + 0x008A0)
 #define   TC_LIVE_STATE_TBT(tc_port)           (1 << ((tc_port) * 8 + 6))
 #define   TC_LIVE_STATE_TC(tc_port)            (1 << ((tc_port) * 8 + 5))
 #define   DP_LANE_ASSIGNMENT_SHIFT(tc_port)    ((tc_port) * 8)
 #define   DP_LANE_ASSIGNMENT_MASK(tc_port)     (0xf << ((tc_port) * 8))
 #define   DP_LANE_ASSIGNMENT(tc_port, x)       ((x) << ((tc_port) * 8))
 
-#define PORT_TX_DFLEXDPPMS                             _MMIO(0x163890)
+#define PORT_TX_DFLEXDPPMS                             _MMIO(FIA1_BASE + 0x00890)
 #define   DP_PHY_MODE_STATUS_COMPLETED(tc_port)                (1 << (tc_port))
 
-#define PORT_TX_DFLEXDPCSSS                            _MMIO(0x163894)
+#define PORT_TX_DFLEXDPCSSS                    _MMIO(FIA1_BASE + 0x00894)
 #define   DP_PHY_MODE_STATUS_NOT_SAFE(tc_port)         (1 << (tc_port))
 
 #endif /* _I915_REG_H_ */
index a492385b2089252d1a1b18daf5af68a9587b5760..71107540581dc783af2aac8bce8a1e78763ad00d 100644 (file)
@@ -111,91 +111,6 @@ i915_request_remove_from_client(struct i915_request *request)
        spin_unlock(&file_priv->mm.lock);
 }
 
-static struct i915_dependency *
-i915_dependency_alloc(struct drm_i915_private *i915)
-{
-       return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
-}
-
-static void
-i915_dependency_free(struct drm_i915_private *i915,
-                    struct i915_dependency *dep)
-{
-       kmem_cache_free(i915->dependencies, dep);
-}
-
-static void
-__i915_sched_node_add_dependency(struct i915_sched_node *node,
-                                struct i915_sched_node *signal,
-                                struct i915_dependency *dep,
-                                unsigned long flags)
-{
-       INIT_LIST_HEAD(&dep->dfs_link);
-       list_add(&dep->wait_link, &signal->waiters_list);
-       list_add(&dep->signal_link, &node->signalers_list);
-       dep->signaler = signal;
-       dep->flags = flags;
-}
-
-static int
-i915_sched_node_add_dependency(struct drm_i915_private *i915,
-                              struct i915_sched_node *node,
-                              struct i915_sched_node *signal)
-{
-       struct i915_dependency *dep;
-
-       dep = i915_dependency_alloc(i915);
-       if (!dep)
-               return -ENOMEM;
-
-       __i915_sched_node_add_dependency(node, signal, dep,
-                                        I915_DEPENDENCY_ALLOC);
-       return 0;
-}
-
-static void
-i915_sched_node_fini(struct drm_i915_private *i915,
-                    struct i915_sched_node *node)
-{
-       struct i915_dependency *dep, *tmp;
-
-       GEM_BUG_ON(!list_empty(&node->link));
-
-       /*
-        * Everyone we depended upon (the fences we wait to be signaled)
-        * should retire before us and remove themselves from our list.
-        * However, retirement is run independently on each timeline and
-        * so we may be called out-of-order.
-        */
-       list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
-               GEM_BUG_ON(!i915_sched_node_signaled(dep->signaler));
-               GEM_BUG_ON(!list_empty(&dep->dfs_link));
-
-               list_del(&dep->wait_link);
-               if (dep->flags & I915_DEPENDENCY_ALLOC)
-                       i915_dependency_free(i915, dep);
-       }
-
-       /* Remove ourselves from everyone who depends upon us */
-       list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
-               GEM_BUG_ON(dep->signaler != node);
-               GEM_BUG_ON(!list_empty(&dep->dfs_link));
-
-               list_del(&dep->signal_link);
-               if (dep->flags & I915_DEPENDENCY_ALLOC)
-                       i915_dependency_free(i915, dep);
-       }
-}
-
-static void
-i915_sched_node_init(struct i915_sched_node *node)
-{
-       INIT_LIST_HEAD(&node->signalers_list);
-       INIT_LIST_HEAD(&node->waiters_list);
-       INIT_LIST_HEAD(&node->link);
-       node->attr.priority = I915_PRIORITY_INVALID;
-}
-
 static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
 {
        struct intel_engine_cs *engine;
@@ -221,6 +136,8 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
                          intel_engine_get_seqno(engine),
                          seqno);
 
+               kthread_park(engine->breadcrumbs.signaler);
+
                if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
                        /* Flush any waiters before we reuse the seqno */
                        intel_engine_disarm_breadcrumbs(engine);
@@ -235,6 +152,8 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
                /* Finally reset hw state */
                intel_engine_init_global_seqno(engine, seqno);
                engine->timeline.seqno = seqno;
+
+               kthread_unpark(engine->breadcrumbs.signaler);
        }
 
        list_for_each_entry(timeline, &i915->gt.timelines, link)
@@ -740,17 +659,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
                if (rq)
                        cond_synchronize_rcu(rq->rcustate);
 
-               /*
-                * We've forced the client to stall and catch up with whatever
-                * backlog there might have been. As we are assuming that we
-                * caused the mempressure, now is an opportune time to
-                * recover as much memory from the request pool as is possible.
-                * Having already penalized the client to stall, we spend
-                * a little extra time to re-optimise page allocation.
-                */
-               kmem_cache_shrink(i915->requests);
-               rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */
-
                rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
                if (!rq) {
                        ret = -ENOMEM;
@@ -1127,8 +1035,20 @@ void i915_request_add(struct i915_request *request)
         */
        local_bh_disable();
        rcu_read_lock(); /* RCU serialisation for set-wedged protection */
-       if (engine->schedule)
-               engine->schedule(request, &request->gem_context->sched);
+       if (engine->schedule) {
+               struct i915_sched_attr attr = request->gem_context->sched;
+
+               /*
+                * Boost priorities to new clients (new request flows).
+                *
+                * Allow interactive/synchronous clients to jump ahead of
+                * the bulk clients. (FQ_CODEL)
+                */
+               if (!prev || i915_request_completed(prev))
+                       attr.priority |= I915_PRIORITY_NEWCLIENT;
+
+               engine->schedule(request, &attr);
+       }
        rcu_read_unlock();
        i915_sw_fence_commit(&request->submit);
        local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
@@ -1310,6 +1230,8 @@ long i915_request_wait(struct i915_request *rq,
                add_wait_queue(errq, &reset);
 
        intel_wait_init(&wait);
+       if (flags & I915_WAIT_PRIORITY)
+               i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
 
 restart:
        do {
index 7fa94b0249683b476142fc1213f86e0d6abac013..90e9d170a0cd5e00645842e3955df058e4a8b338 100644 (file)
@@ -277,8 +277,9 @@ long i915_request_wait(struct i915_request *rq,
        __attribute__((nonnull(1)));
 #define I915_WAIT_INTERRUPTIBLE        BIT(0)
 #define I915_WAIT_LOCKED       BIT(1) /* struct_mutex held, handle GPU reset */
-#define I915_WAIT_ALL          BIT(2) /* used by i915_gem_object_wait() */
-#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
+#define I915_WAIT_PRIORITY     BIT(2) /* small priority bump for the request */
+#define I915_WAIT_ALL          BIT(3) /* used by i915_gem_object_wait() */
+#define I915_WAIT_FOR_IDLE_BOOST BIT(4)
 
 static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
                                            u32 seqno);
@@ -332,14 +333,6 @@ static inline bool i915_request_completed(const struct i915_request *rq)
        return __i915_request_completed(rq, seqno);
 }
 
-static inline bool i915_sched_node_signaled(const struct i915_sched_node *node)
-{
-       const struct i915_request *rq =
-               container_of(node, const struct i915_request, sched);
-
-       return i915_request_completed(rq);
-}
-
 void i915_retire_requests(struct drm_i915_private *i915);
 
 /*
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
new file mode 100644 (file)
index 0000000..340faea
--- /dev/null
@@ -0,0 +1,399 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/mutex.h>
+
+#include "i915_drv.h"
+#include "i915_request.h"
+#include "i915_scheduler.h"
+
+static DEFINE_SPINLOCK(schedule_lock);
+
+static const struct i915_request *
+node_to_request(const struct i915_sched_node *node)
+{
+       return container_of(node, const struct i915_request, sched);
+}
+
+static inline bool node_signaled(const struct i915_sched_node *node)
+{
+       return i915_request_completed(node_to_request(node));
+}
+
+void i915_sched_node_init(struct i915_sched_node *node)
+{
+       INIT_LIST_HEAD(&node->signalers_list);
+       INIT_LIST_HEAD(&node->waiters_list);
+       INIT_LIST_HEAD(&node->link);
+       node->attr.priority = I915_PRIORITY_INVALID;
+}
+
+static struct i915_dependency *
+i915_dependency_alloc(struct drm_i915_private *i915)
+{
+       return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
+}
+
+static void
+i915_dependency_free(struct drm_i915_private *i915,
+                    struct i915_dependency *dep)
+{
+       kmem_cache_free(i915->dependencies, dep);
+}
+
+bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
+                                     struct i915_sched_node *signal,
+                                     struct i915_dependency *dep,
+                                     unsigned long flags)
+{
+       bool ret = false;
+
+       spin_lock(&schedule_lock);
+
+       if (!node_signaled(signal)) {
+               INIT_LIST_HEAD(&dep->dfs_link);
+               list_add(&dep->wait_link, &signal->waiters_list);
+               list_add(&dep->signal_link, &node->signalers_list);
+               dep->signaler = signal;
+               dep->flags = flags;
+
+               ret = true;
+       }
+
+       spin_unlock(&schedule_lock);
+
+       return ret;
+}
+
+int i915_sched_node_add_dependency(struct drm_i915_private *i915,
+                                  struct i915_sched_node *node,
+                                  struct i915_sched_node *signal)
+{
+       struct i915_dependency *dep;
+
+       dep = i915_dependency_alloc(i915);
+       if (!dep)
+               return -ENOMEM;
+
+       if (!__i915_sched_node_add_dependency(node, signal, dep,
+                                             I915_DEPENDENCY_ALLOC))
+               i915_dependency_free(i915, dep);
+
+       return 0;
+}
+
+void i915_sched_node_fini(struct drm_i915_private *i915,
+                         struct i915_sched_node *node)
+{
+       struct i915_dependency *dep, *tmp;
+
+       GEM_BUG_ON(!list_empty(&node->link));
+
+       spin_lock(&schedule_lock);
+
+       /*
+        * Everyone we depended upon (the fences we wait to be signaled)
+        * should retire before us and remove themselves from our list.
+        * However, retirement is run independently on each timeline and
+        * so we may be called out-of-order.
+        */
+       list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
+               GEM_BUG_ON(!node_signaled(dep->signaler));
+               GEM_BUG_ON(!list_empty(&dep->dfs_link));
+
+               list_del(&dep->wait_link);
+               if (dep->flags & I915_DEPENDENCY_ALLOC)
+                       i915_dependency_free(i915, dep);
+       }
+
+       /* Remove ourselves from everyone who depends upon us */
+       list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
+               GEM_BUG_ON(dep->signaler != node);
+               GEM_BUG_ON(!list_empty(&dep->dfs_link));
+
+               list_del(&dep->signal_link);
+               if (dep->flags & I915_DEPENDENCY_ALLOC)
+                       i915_dependency_free(i915, dep);
+       }
+
+       spin_unlock(&schedule_lock);
+}
+
+static inline struct i915_priolist *to_priolist(struct rb_node *rb)
+{
+       return rb_entry(rb, struct i915_priolist, node);
+}
+
+static void assert_priolists(struct intel_engine_execlists * const execlists,
+                            long queue_priority)
+{
+       struct rb_node *rb;
+       long last_prio, i;
+
+       if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+               return;
+
+       GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
+                  rb_first(&execlists->queue.rb_root));
+
+       last_prio = (queue_priority >> I915_USER_PRIORITY_SHIFT) + 1;
+       for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
+               const struct i915_priolist *p = to_priolist(rb);
+
+               GEM_BUG_ON(p->priority >= last_prio);
+               last_prio = p->priority;
+
+               GEM_BUG_ON(!p->used);
+               for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
+                       if (list_empty(&p->requests[i]))
+                               continue;
+
+                       GEM_BUG_ON(!(p->used & BIT(i)));
+               }
+       }
+}
+
+struct list_head *
+i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
+{
+       struct intel_engine_execlists * const execlists = &engine->execlists;
+       struct i915_priolist *p;
+       struct rb_node **parent, *rb;
+       bool first = true;
+       int idx, i;
+
+       lockdep_assert_held(&engine->timeline.lock);
+       assert_priolists(execlists, INT_MAX);
+
+       /* buckets sorted from highest [in slot 0] to lowest priority */
+       idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
+       prio >>= I915_USER_PRIORITY_SHIFT;
+       if (unlikely(execlists->no_priolist))
+               prio = I915_PRIORITY_NORMAL;
+
+find_priolist:
+       /* most positive priority is scheduled first, equal priorities fifo */
+       rb = NULL;
+       parent = &execlists->queue.rb_root.rb_node;
+       while (*parent) {
+               rb = *parent;
+               p = to_priolist(rb);
+               if (prio > p->priority) {
+                       parent = &rb->rb_left;
+               } else if (prio < p->priority) {
+                       parent = &rb->rb_right;
+                       first = false;
+               } else {
+                       goto out;
+               }
+       }
+
+       if (prio == I915_PRIORITY_NORMAL) {
+               p = &execlists->default_priolist;
+       } else {
+               p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
+               /* Convert an allocation failure to a priority bump */
+               if (unlikely(!p)) {
+                       prio = I915_PRIORITY_NORMAL; /* recurses just once */
+
+                       /* To maintain ordering with all rendering, after an
+                        * allocation failure we have to disable all scheduling.
+                        * Requests will then be executed in fifo, and schedule
+                        * will ensure that dependencies are emitted in fifo.
+                        * There will be still some reordering with existing
+                        * requests, so if userspace lied about their
+                        * dependencies that reordering may be visible.
+                        */
+                       execlists->no_priolist = true;
+                       goto find_priolist;
+               }
+       }
+
+       p->priority = prio;
+       for (i = 0; i < ARRAY_SIZE(p->requests); i++)
+               INIT_LIST_HEAD(&p->requests[i]);
+       rb_link_node(&p->node, rb, parent);
+       rb_insert_color_cached(&p->node, &execlists->queue, first);
+       p->used = 0;
+
+out:
+       p->used |= BIT(idx);
+       return &p->requests[idx];
+}
+
+static struct intel_engine_cs *
+sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
+{
+       struct intel_engine_cs *engine = node_to_request(node)->engine;
+
+       GEM_BUG_ON(!locked);
+
+       if (engine != locked) {
+               spin_unlock(&locked->timeline.lock);
+               spin_lock(&engine->timeline.lock);
+       }
+
+       return engine;
+}
+
+static void __i915_schedule(struct i915_request *rq,
+                           const struct i915_sched_attr *attr)
+{
+       struct list_head *uninitialized_var(pl);
+       struct intel_engine_cs *engine, *last;
+       struct i915_dependency *dep, *p;
+       struct i915_dependency stack;
+       const int prio = attr->priority;
+       LIST_HEAD(dfs);
+
+       /* Needed in order to use the temporary link inside i915_dependency */
+       lockdep_assert_held(&schedule_lock);
+       GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
+
+       if (i915_request_completed(rq))
+               return;
+
+       if (prio <= READ_ONCE(rq->sched.attr.priority))
+               return;
+
+       stack.signaler = &rq->sched;
+       list_add(&stack.dfs_link, &dfs);
+
+       /*
+        * Recursively bump all dependent priorities to match the new request.
+        *
+        * A naive approach would be to use recursion:
+        * static void update_priorities(struct i915_sched_node *node, prio) {
+        *      list_for_each_entry(dep, &node->signalers_list, signal_link)
+        *              update_priorities(dep->signal, prio)
+        *      queue_request(node);
+        * }
+        * but that may have unlimited recursion depth and so runs a very
+        * real risk of overunning the kernel stack. Instead, we build
+        * a flat list of all dependencies starting with the current request.
+        * As we walk the list of dependencies, we add all of its dependencies
+        * to the end of the list (this may include an already visited
+        * request) and continue to walk onwards onto the new dependencies. The
+        * end result is a topological list of requests in reverse order, the
+        * last element in the list is the request we must execute first.
+        */
+       list_for_each_entry(dep, &dfs, dfs_link) {
+               struct i915_sched_node *node = dep->signaler;
+
+               /*
+                * Within an engine, there can be no cycle, but we may
+                * refer to the same dependency chain multiple times
+                * (redundant dependencies are not eliminated) and across
+                * engines.
+                */
+               list_for_each_entry(p, &node->signalers_list, signal_link) {
+                       GEM_BUG_ON(p == dep); /* no cycles! */
+
+                       if (node_signaled(p->signaler))
+                               continue;
+
+                       GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
+                       if (prio > READ_ONCE(p->signaler->attr.priority))
+                               list_move_tail(&p->dfs_link, &dfs);
+               }
+       }
+
+       /*
+        * If we didn't need to bump any existing priorities, and we haven't
+        * yet submitted this request (i.e. there is no potential race with
+        * execlists_submit_request()), we can set our own priority and skip
+        * acquiring the engine locks.
+        */
+       if (rq->sched.attr.priority == I915_PRIORITY_INVALID) {
+               GEM_BUG_ON(!list_empty(&rq->sched.link));
+               rq->sched.attr = *attr;
+
+               if (stack.dfs_link.next == stack.dfs_link.prev)
+                       return;
+
+               __list_del_entry(&stack.dfs_link);
+       }
+
+       last = NULL;
+       engine = rq->engine;
+       spin_lock_irq(&engine->timeline.lock);
+
+       /* Fifo and depth-first replacement ensure our deps execute before us */
+       list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
+               struct i915_sched_node *node = dep->signaler;
+
+               INIT_LIST_HEAD(&dep->dfs_link);
+
+               engine = sched_lock_engine(node, engine);
+
+               /* Recheck after acquiring the engine->timeline.lock */
+               if (prio <= node->attr.priority || node_signaled(node))
+                       continue;
+
+               node->attr.priority = prio;
+               if (!list_empty(&node->link)) {
+                       if (last != engine) {
+                               pl = i915_sched_lookup_priolist(engine, prio);
+                               last = engine;
+                       }
+                       list_move_tail(&node->link, pl);
+               } else {
+                       /*
+                        * If the request is not in the priolist queue because
+                        * it is not yet runnable, then it doesn't contribute
+                        * to our preemption decisions. On the other hand,
+                        * if the request is on the HW, it too is not in the
+                        * queue; but in that case we may still need to reorder
+                        * the inflight requests.
+                        */
+                       if (!i915_sw_fence_done(&node_to_request(node)->submit))
+                               continue;
+               }
+
+               if (prio <= engine->execlists.queue_priority)
+                       continue;
+
+               /*
+                * If we are already the currently executing context, don't
+                * bother evaluating if we should preempt ourselves.
+                */
+               if (node_to_request(node)->global_seqno &&
+                   i915_seqno_passed(port_request(engine->execlists.port)->global_seqno,
+                                     node_to_request(node)->global_seqno))
+                       continue;
+
+               /* Defer (tasklet) submission until after all of our updates. */
+               engine->execlists.queue_priority = prio;
+               tasklet_hi_schedule(&engine->execlists.tasklet);
+       }
+
+       spin_unlock_irq(&engine->timeline.lock);
+}
+
+void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
+{
+       spin_lock(&schedule_lock);
+       __i915_schedule(rq, attr);
+       spin_unlock(&schedule_lock);
+}
+
+void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
+{
+       struct i915_sched_attr attr;
+
+       GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
+
+       if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
+               return;
+
+       spin_lock_bh(&schedule_lock);
+
+       attr = rq->sched.attr;
+       attr.priority |= bump;
+       __i915_schedule(rq, &attr);
+
+       spin_unlock_bh(&schedule_lock);
+}
index 70a42220358d894170e63aa64724f142942ef940..dbe9cb7ecd82928bc83be7b042994a1566724d82 100644 (file)
@@ -8,9 +8,14 @@
 #define _I915_SCHEDULER_H_
 
 #include <linux/bitops.h>
+#include <linux/kernel.h>
 
 #include <uapi/drm/i915_drm.h>
 
+struct drm_i915_private;
+struct i915_request;
+struct intel_engine_cs;
+
 enum {
        I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
        I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
@@ -19,6 +24,15 @@ enum {
        I915_PRIORITY_INVALID = INT_MIN
 };
 
+#define I915_USER_PRIORITY_SHIFT 2
+#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
+
+#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
+#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
+
+#define I915_PRIORITY_WAIT     ((u8)BIT(0))
+#define I915_PRIORITY_NEWCLIENT        ((u8)BIT(1))
+
 struct i915_sched_attr {
        /**
         * @priority: execution and service priority
@@ -69,4 +83,26 @@ struct i915_dependency {
 #define I915_DEPENDENCY_ALLOC BIT(0)
 };
 
+void i915_sched_node_init(struct i915_sched_node *node);
+
+bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
+                                     struct i915_sched_node *signal,
+                                     struct i915_dependency *dep,
+                                     unsigned long flags);
+
+int i915_sched_node_add_dependency(struct drm_i915_private *i915,
+                                  struct i915_sched_node *node,
+                                  struct i915_sched_node *signal);
+
+void i915_sched_node_fini(struct drm_i915_private *i915,
+                         struct i915_sched_node *node);
+
+void i915_schedule(struct i915_request *request,
+                  const struct i915_sched_attr *attr);
+
+void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump);
+
+struct list_head *
+i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);
+
 #endif /* _I915_SCHEDULER_H_ */
index 58f8d0cc125c0e6e58a30973d8443037208f4bb4..60404dbb2e9fa9f3484989cdfe34bacd202d0ca9 100644 (file)
@@ -92,7 +92,7 @@ void i915_syncmap_init(struct i915_syncmap **root)
 {
        BUILD_BUG_ON_NOT_POWER_OF_2(KSYNCMAP);
        BUILD_BUG_ON_NOT_POWER_OF_2(SHIFT);
-       BUILD_BUG_ON(KSYNCMAP > BITS_PER_BYTE * sizeof((*root)->bitmap));
+       BUILD_BUG_ON(KSYNCMAP > BITS_PER_TYPE((*root)->bitmap));
        *root = NULL;
 }
 
index a2c2c3ab5fb0ceea22c2c1426b1c307b5d1aa460..ebd71b487220aec95a2bfdeb1c924f0836ae64c1 100644 (file)
@@ -83,6 +83,25 @@ void i915_timeline_init(struct drm_i915_private *i915,
                        const char *name);
 void i915_timeline_fini(struct i915_timeline *tl);
 
+static inline void
+i915_timeline_set_subclass(struct i915_timeline *timeline,
+                          unsigned int subclass)
+{
+       lockdep_set_subclass(&timeline->lock, subclass);
+
+       /*
+        * Due to an interesting quirk in lockdep's internal debug tracking,
+        * after setting a subclass we must ensure the lock is used. Otherwise,
+        * nr_unused_locks is incremented once too often.
+        */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       local_irq_disable();
+       lock_map_acquire(&timeline->lock.dep_map);
+       lock_map_release(&timeline->lock.dep_map);
+       local_irq_enable();
+#endif
+}
+
 struct i915_timeline *
 i915_timeline_create(struct drm_i915_private *i915, const char *name);
 
index 395dd251156833d46a48de3e49163c22a6627bc8..5858a43e19daa908221d36428a8f4a62787ef245 100644 (file)
@@ -68,7 +68,7 @@
 
 /* Note we don't consider signbits :| */
 #define overflows_type(x, T) \
-       (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
+       (sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T))
 
 #define ptr_mask_bits(ptr, n) ({                                       \
        unsigned long __v = (unsigned long)(ptr);                       \
index 35fce4c88629ea4b61187819194f08767e1bb71e..5b4d78cdb4ca32c4162322b4750e2dec80fe99d1 100644 (file)
@@ -305,12 +305,12 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
        GEM_BUG_ON(vma->size > vma->node.size);
 
-       if (GEM_WARN_ON(range_overflows(vma->node.start,
-                                       vma->node.size,
-                                       vma->vm->total)))
+       if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
+                                             vma->node.size,
+                                             vma->vm->total)))
                return -ENODEV;
 
-       if (GEM_WARN_ON(!flags))
+       if (GEM_DEBUG_WARN_ON(!flags))
                return -EINVAL;
 
        bind_flags = 0;
index 13830e43a4d12a8fa59ff4c131aa3cddeff8fe10..01f422df8c230804ad0780e7a638c01b52262bcc 100644 (file)
  *   Jani Nikula <jani.nikula@intel.com>
  */
 
+#include <drm/drm_mipi_dsi.h>
 #include "intel_dsi.h"
 
+static inline int header_credits_available(struct drm_i915_private *dev_priv,
+                                          enum transcoder dsi_trans)
+{
+       return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
+               >> FREE_HEADER_CREDIT_SHIFT;
+}
+
+static inline int payload_credits_available(struct drm_i915_private *dev_priv,
+                                           enum transcoder dsi_trans)
+{
+       return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
+               >> FREE_PLOAD_CREDIT_SHIFT;
+}
+
+static void wait_for_header_credits(struct drm_i915_private *dev_priv,
+                                   enum transcoder dsi_trans)
+{
+       if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >=
+                       MAX_HEADER_CREDIT, 100))
+               DRM_ERROR("DSI header credits not released\n");
+}
+
+static void wait_for_payload_credits(struct drm_i915_private *dev_priv,
+                                    enum transcoder dsi_trans)
+{
+       if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >=
+                       MAX_PLOAD_CREDIT, 100))
+               DRM_ERROR("DSI payload credits not released\n");
+}
+
+static enum transcoder dsi_port_to_transcoder(enum port port)
+{
+       if (port == PORT_A)
+               return TRANSCODER_DSI_0;
+       else
+               return TRANSCODER_DSI_1;
+}
+
+static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       struct mipi_dsi_device *dsi;
+       enum port port;
+       enum transcoder dsi_trans;
+       int ret;
+
+       /* wait for header/payload credits to be released */
+       for_each_dsi_port(port, intel_dsi->ports) {
+               dsi_trans = dsi_port_to_transcoder(port);
+               wait_for_header_credits(dev_priv, dsi_trans);
+               wait_for_payload_credits(dev_priv, dsi_trans);
+       }
+
+       /* send nop DCS command */
+       for_each_dsi_port(port, intel_dsi->ports) {
+               dsi = intel_dsi->dsi_hosts[port]->device;
+               dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+               dsi->channel = 0;
+               ret = mipi_dsi_dcs_nop(dsi);
+               if (ret < 0)
+                       DRM_ERROR("error sending DCS NOP command\n");
+       }
+
+       /* wait for header credits to be released */
+       for_each_dsi_port(port, intel_dsi->ports) {
+               dsi_trans = dsi_port_to_transcoder(port);
+               wait_for_header_credits(dev_priv, dsi_trans);
+       }
+
+       /* wait for LP TX in progress bit to be cleared */
+       for_each_dsi_port(port, intel_dsi->ports) {
+               dsi_trans = dsi_port_to_transcoder(port);
+               if (wait_for_us(!(I915_READ(DSI_LP_MSG(dsi_trans)) &
+                                 LPTX_IN_PROGRESS), 20))
+                       DRM_ERROR("LPTX bit not cleared\n");
+       }
+}
+
+static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       enum port port;
+       u32 tmp;
+       int lane;
+
+       for_each_dsi_port(port, intel_dsi->ports) {
+
+               /*
+                * Program voltage swing and pre-emphasis level values as per
+                * table in BSPEC under DDI buffer programing
+                */
+               tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+               tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
+               tmp |= SCALING_MODE_SEL(0x2);
+               tmp |= TAP2_DISABLE | TAP3_DISABLE;
+               tmp |= RTERM_SELECT(0x6);
+               I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
+
+               tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
+               tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
+               tmp |= SCALING_MODE_SEL(0x2);
+               tmp |= TAP2_DISABLE | TAP3_DISABLE;
+               tmp |= RTERM_SELECT(0x6);
+               I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
+
+               tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port));
+               tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
+                        RCOMP_SCALAR_MASK);
+               tmp |= SWING_SEL_UPPER(0x2);
+               tmp |= SWING_SEL_LOWER(0x2);
+               tmp |= RCOMP_SCALAR(0x98);
+               I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
+
+               tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port));
+               tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
+                        RCOMP_SCALAR_MASK);
+               tmp |= SWING_SEL_UPPER(0x2);
+               tmp |= SWING_SEL_LOWER(0x2);
+               tmp |= RCOMP_SCALAR(0x98);
+               I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp);
+
+               tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port));
+               tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
+                        CURSOR_COEFF_MASK);
+               tmp |= POST_CURSOR_1(0x0);
+               tmp |= POST_CURSOR_2(0x0);
+               tmp |= CURSOR_COEFF(0x3f);
+               I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
+
+               for (lane = 0; lane <= 3; lane++) {
+                       /* Bspec: must not use GRP register for write */
+                       tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
+                       tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
+                                CURSOR_COEFF_MASK);
+                       tmp |= POST_CURSOR_1(0x0);
+                       tmp |= POST_CURSOR_2(0x0);
+                       tmp |= CURSOR_COEFF(0x3f);
+                       I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
+               }
+       }
+}
+
 static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -105,10 +250,553 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
        }
 }
 
-static void gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder)
+static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       enum port port;
+       u32 tmp;
+       int lane;
+
+       /* Step 4b(i) set loadgen select for transmit and aux lanes */
+       for_each_dsi_port(port, intel_dsi->ports) {
+               tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port));
+               tmp &= ~LOADGEN_SELECT;
+               I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
+               for (lane = 0; lane <= 3; lane++) {
+                       tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
+                       tmp &= ~LOADGEN_SELECT;
+                       if (lane != 2)
+                               tmp |= LOADGEN_SELECT;
+                       I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
+               }
+       }
+
+       /* Step 4b(ii) set latency optimization for transmit and aux lanes */
+       for_each_dsi_port(port, intel_dsi->ports) {
+               tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port));
+               tmp &= ~FRC_LATENCY_OPTIM_MASK;
+               tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
+               I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp);
+               tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port));
+               tmp &= ~FRC_LATENCY_OPTIM_MASK;
+               tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
+               I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
+       }
+
+}
+
+static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       u32 tmp;
+       enum port port;
+
+       /* clear common keeper enable bit */
+       for_each_dsi_port(port, intel_dsi->ports) {
+               tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(port));
+               tmp &= ~COMMON_KEEPER_EN;
+               I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), tmp);
+               tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(port));
+               tmp &= ~COMMON_KEEPER_EN;
+               I915_WRITE(ICL_PORT_PCS_DW1_AUX(port), tmp);
+       }
+
+       /*
+        * Set SUS Clock Config bitfield to 11b
+        * Note: loadgen select program is done
+        * as part of lane phy sequence configuration
+        */
+       for_each_dsi_port(port, intel_dsi->ports) {
+               tmp = I915_READ(ICL_PORT_CL_DW5(port));
+               tmp |= SUS_CLOCK_CONFIG;
+               I915_WRITE(ICL_PORT_CL_DW5(port), tmp);
+       }
+
+       /* Clear training enable to change swing values */
+       for_each_dsi_port(port, intel_dsi->ports) {
+               tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+               tmp &= ~TX_TRAINING_EN;
+               I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
+               tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
+               tmp &= ~TX_TRAINING_EN;
+               I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
+       }
+
+       /* Program swing and de-emphasis */
+       dsi_program_swing_and_deemphasis(encoder);
+
+       /* Set training enable to trigger update */
+       for_each_dsi_port(port, intel_dsi->ports) {
+               tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+               tmp |= TX_TRAINING_EN;
+               I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
+               tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
+               tmp |= TX_TRAINING_EN;
+               I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
+       }
+}
+
+static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       u32 tmp;
+       enum port port;
+
+       for_each_dsi_port(port, intel_dsi->ports) {
+               tmp = I915_READ(DDI_BUF_CTL(port));
+               tmp |= DDI_BUF_CTL_ENABLE;
+               I915_WRITE(DDI_BUF_CTL(port), tmp);
+
+               if (wait_for_us(!(I915_READ(DDI_BUF_CTL(port)) &
+                                 DDI_BUF_IS_IDLE),
+                                 500))
+                       DRM_ERROR("DDI port:%c buffer idle\n", port_name(port));
+       }
+}
+
+static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       u32 tmp;
+       enum port port;
+
+       /* Program T-INIT master registers */
+       for_each_dsi_port(port, intel_dsi->ports) {
+               tmp = I915_READ(ICL_DSI_T_INIT_MASTER(port));
+               tmp &= ~MASTER_INIT_TIMER_MASK;
+               tmp |= intel_dsi->init_count;
+               I915_WRITE(ICL_DSI_T_INIT_MASTER(port), tmp);
+       }
+
+       /* Program DPHY clock lanes timings */
+       for_each_dsi_port(port, intel_dsi->ports) {
+               I915_WRITE(DPHY_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg);
+
+               /* shadow register inside display core */
+               I915_WRITE(DSI_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg);
+       }
+
+       /* Program DPHY data lanes timings */
+       for_each_dsi_port(port, intel_dsi->ports) {
+               I915_WRITE(DPHY_DATA_TIMING_PARAM(port),
+                          intel_dsi->dphy_data_lane_reg);
+
+               /* shadow register inside display core */
+               I915_WRITE(DSI_DATA_TIMING_PARAM(port),
+                          intel_dsi->dphy_data_lane_reg);
+       }
+
+       /*
+        * If DSI link operating at or below an 800 MHz,
+        * TA_SURE should be override and programmed to
+        * a value '0' inside TA_PARAM_REGISTERS otherwise
+        * leave all fields at HW default values.
+        */
+       if (intel_dsi_bitrate(intel_dsi) <= 800000) {
+               for_each_dsi_port(port, intel_dsi->ports) {
+                       tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
+                       tmp &= ~TA_SURE_MASK;
+                       tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
+                       I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp);
+
+                       /* shadow register inside display core */
+                       tmp = I915_READ(DSI_TA_TIMING_PARAM(port));
+                       tmp &= ~TA_SURE_MASK;
+                       tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
+                       I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp);
+               }
+       }
+}
+
+static void
+gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
+                              const struct intel_crtc_state *pipe_config)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+       enum pipe pipe = intel_crtc->pipe;
+       u32 tmp;
+       enum port port;
+       enum transcoder dsi_trans;
+
+       for_each_dsi_port(port, intel_dsi->ports) {
+               dsi_trans = dsi_port_to_transcoder(port);
+               tmp = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans));
+
+               if (intel_dsi->eotp_pkt)
+                       tmp &= ~EOTP_DISABLED;
+               else
+                       tmp |= EOTP_DISABLED;
+
+               /* enable link calibration if freq > 1.5Gbps */
+               if (intel_dsi_bitrate(intel_dsi) >= 1500 * 1000) {
+                       tmp &= ~LINK_CALIBRATION_MASK;
+                       tmp |= CALIBRATION_ENABLED_INITIAL_ONLY;
+               }
+
+               /* configure continuous clock */
+               tmp &= ~CONTINUOUS_CLK_MASK;
+               if (intel_dsi->clock_stop)
+                       tmp |= CLK_ENTER_LP_AFTER_DATA;
+               else
+                       tmp |= CLK_HS_CONTINUOUS;
+
+               /* configure buffer threshold limit to minimum */
+               tmp &= ~PIX_BUF_THRESHOLD_MASK;
+               tmp |= PIX_BUF_THRESHOLD_1_4;
+
+               /* set virtual channel to '0' */
+               tmp &= ~PIX_VIRT_CHAN_MASK;
+               tmp |= PIX_VIRT_CHAN(0);
+
+               /* program BGR transmission */
+               if (intel_dsi->bgr_enabled)
+                       tmp |= BGR_TRANSMISSION;
+
+               /* select pixel format */
+               tmp &= ~PIX_FMT_MASK;
+               switch (intel_dsi->pixel_format) {
+               default:
+                       MISSING_CASE(intel_dsi->pixel_format);
+                       /* fallthrough */
+               case MIPI_DSI_FMT_RGB565:
+                       tmp |= PIX_FMT_RGB565;
+                       break;
+               case MIPI_DSI_FMT_RGB666_PACKED:
+                       tmp |= PIX_FMT_RGB666_PACKED;
+                       break;
+               case MIPI_DSI_FMT_RGB666:
+                       tmp |= PIX_FMT_RGB666_LOOSE;
+                       break;
+               case MIPI_DSI_FMT_RGB888:
+                       tmp |= PIX_FMT_RGB888;
+                       break;
+               }
+
+               /* program DSI operation mode */
+               if (is_vid_mode(intel_dsi)) {
+                       tmp &= ~OP_MODE_MASK;
+                       switch (intel_dsi->video_mode_format) {
+                       default:
+                               MISSING_CASE(intel_dsi->video_mode_format);
+                               /* fallthrough */
+                       case VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS:
+                               tmp |= VIDEO_MODE_SYNC_EVENT;
+                               break;
+                       case VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE:
+                               tmp |= VIDEO_MODE_SYNC_PULSE;
+                               break;
+                       }
+               }
+
+               I915_WRITE(DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
+       }
+
+       /* enable port sync mode if dual link */
+       if (intel_dsi->dual_link) {
+               for_each_dsi_port(port, intel_dsi->ports) {
+                       dsi_trans = dsi_port_to_transcoder(port);
+                       tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans));
+                       tmp |= PORT_SYNC_MODE_ENABLE;
+                       I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+               }
+
+               //TODO: configure DSS_CTL1
+       }
+
+       for_each_dsi_port(port, intel_dsi->ports) {
+               dsi_trans = dsi_port_to_transcoder(port);
+
+               /* select data lane width */
+               tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+               tmp &= ~DDI_PORT_WIDTH_MASK;
+               tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count);
+
+               /* select input pipe */
+               tmp &= ~TRANS_DDI_EDP_INPUT_MASK;
+               switch (pipe) {
+               default:
+                       MISSING_CASE(pipe);
+                       /* fallthrough */
+               case PIPE_A:
+                       tmp |= TRANS_DDI_EDP_INPUT_A_ON;
+                       break;
+               case PIPE_B:
+                       tmp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
+                       break;
+               case PIPE_C:
+                       tmp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
+                       break;
+               }
+
+               /* enable DDI buffer */
+               tmp |= TRANS_DDI_FUNC_ENABLE;
+               I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
+       }
+
+       /* wait for link ready */
+       for_each_dsi_port(port, intel_dsi->ports) {
+               dsi_trans = dsi_port_to_transcoder(port);
+               if (wait_for_us((I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans)) &
+                               LINK_READY), 2500))
+                       DRM_ERROR("DSI link not ready\n");
+       }
+}
+
+static void
+gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
+                                const struct intel_crtc_state *pipe_config)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       const struct drm_display_mode *adjusted_mode =
+                                       &pipe_config->base.adjusted_mode;
+       enum port port;
+       enum transcoder dsi_trans;
+       /* horizontal timings */
+       u16 htotal, hactive, hsync_start, hsync_end, hsync_size;
+       u16 hfront_porch, hback_porch;
+       /* vertical timings */
+       u16 vtotal, vactive, vsync_start, vsync_end, vsync_shift;
+
+       hactive = adjusted_mode->crtc_hdisplay;
+       htotal = adjusted_mode->crtc_htotal;
+       hsync_start = adjusted_mode->crtc_hsync_start;
+       hsync_end = adjusted_mode->crtc_hsync_end;
+       hsync_size  = hsync_end - hsync_start;
+       hfront_porch = (adjusted_mode->crtc_hsync_start -
+                       adjusted_mode->crtc_hdisplay);
+       hback_porch = (adjusted_mode->crtc_htotal -
+                      adjusted_mode->crtc_hsync_end);
+       vactive = adjusted_mode->crtc_vdisplay;
+       vtotal = adjusted_mode->crtc_vtotal;
+       vsync_start = adjusted_mode->crtc_vsync_start;
+       vsync_end = adjusted_mode->crtc_vsync_end;
+       vsync_shift = hsync_start - htotal / 2;
+
+       if (intel_dsi->dual_link) {
+               hactive /= 2;
+               if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+                       hactive += intel_dsi->pixel_overlap;
+               htotal /= 2;
+       }
+
+       /* minimum hactive as per bspec: 256 pixels */
+       if (adjusted_mode->crtc_hdisplay < 256)
+               DRM_ERROR("hactive is less then 256 pixels\n");
+
+       /* if RGB666 format, then hactive must be multiple of 4 pixels */
+       if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB666 && hactive % 4 != 0)
+               DRM_ERROR("hactive pixels are not multiple of 4\n");
+
+       /* program TRANS_HTOTAL register */
+       for_each_dsi_port(port, intel_dsi->ports) {
+               dsi_trans = dsi_port_to_transcoder(port);
+               I915_WRITE(HTOTAL(dsi_trans),
+                          (hactive - 1) | ((htotal - 1) << 16));
+       }
+
+       /* TRANS_HSYNC register to be programmed only for video mode */
+       if (intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE) {
+               if (intel_dsi->video_mode_format ==
+                   VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE) {
+                       /* BSPEC: hsync size should be atleast 16 pixels */
+                       if (hsync_size < 16)
+                               DRM_ERROR("hsync size < 16 pixels\n");
+               }
+
+               if (hback_porch < 16)
+                       DRM_ERROR("hback porch < 16 pixels\n");
+
+               if (intel_dsi->dual_link) {
+                       hsync_start /= 2;
+                       hsync_end /= 2;
+               }
+
+               for_each_dsi_port(port, intel_dsi->ports) {
+                       dsi_trans = dsi_port_to_transcoder(port);
+                       I915_WRITE(HSYNC(dsi_trans),
+                                  (hsync_start - 1) | ((hsync_end - 1) << 16));
+               }
+       }
+
+       /* program TRANS_VTOTAL register */
+       for_each_dsi_port(port, intel_dsi->ports) {
+               dsi_trans = dsi_port_to_transcoder(port);
+               /*
+                * FIXME: Programing this by assuming progressive mode, since
+                * non-interlaced info from VBT is not saved inside
+                * struct drm_display_mode.
+                * For interlace mode: program required pixel minus 2
+                */
+               I915_WRITE(VTOTAL(dsi_trans),
+                          (vactive - 1) | ((vtotal - 1) << 16));
+       }
+
+       if (vsync_end < vsync_start || vsync_end > vtotal)
+               DRM_ERROR("Invalid vsync_end value\n");
+
+       if (vsync_start < vactive)
+               DRM_ERROR("vsync_start less than vactive\n");
+
+       /* program TRANS_VSYNC register */
+       for_each_dsi_port(port, intel_dsi->ports) {
+               dsi_trans = dsi_port_to_transcoder(port);
+               I915_WRITE(VSYNC(dsi_trans),
+                          (vsync_start - 1) | ((vsync_end - 1) << 16));
+       }
+
+       /*
+        * FIXME: It has to be programmed only for interlaced
+        * modes. Put the check condition here once interlaced
+        * info available as described above.
+        * program TRANS_VSYNCSHIFT register
+        */
+       for_each_dsi_port(port, intel_dsi->ports) {
+               dsi_trans = dsi_port_to_transcoder(port);
+               I915_WRITE(VSYNCSHIFT(dsi_trans), vsync_shift);
+       }
+}
+
+static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       enum port port;
+       enum transcoder dsi_trans;
+       u32 tmp;
+
+       for_each_dsi_port(port, intel_dsi->ports) {
+               dsi_trans = dsi_port_to_transcoder(port);
+               tmp = I915_READ(PIPECONF(dsi_trans));
+               tmp |= PIPECONF_ENABLE;
+               I915_WRITE(PIPECONF(dsi_trans), tmp);
+
+               /* wait for transcoder to be enabled */
+               if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
+                                           I965_PIPECONF_ACTIVE,
+                                           I965_PIPECONF_ACTIVE, 10))
+                       DRM_ERROR("DSI transcoder not enabled\n");
+       }
+}
+
+static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       enum port port;
+       enum transcoder dsi_trans;
+       u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul;
+
+       /*
+        * escape clock count calculation:
+        * BYTE_CLK_COUNT = TIME_NS/(8 * UI)
+        * UI (nsec) = (10^6)/Bitrate
+        * TIME_NS = (BYTE_CLK_COUNT * 8 * 10^6)/ Bitrate
+        * ESCAPE_CLK_COUNT  = TIME_NS/ESC_CLK_NS
+        */
+       divisor = intel_dsi_tlpx_ns(intel_dsi) * intel_dsi_bitrate(intel_dsi) * 1000;
+       mul = 8 * 1000000;
+       hs_tx_timeout = DIV_ROUND_UP(intel_dsi->hs_tx_timeout * mul,
+                                    divisor);
+       lp_rx_timeout = DIV_ROUND_UP(intel_dsi->lp_rx_timeout * mul, divisor);
+       ta_timeout = DIV_ROUND_UP(intel_dsi->turn_arnd_val * mul, divisor);
+
+       for_each_dsi_port(port, intel_dsi->ports) {
+               dsi_trans = dsi_port_to_transcoder(port);
+
+               /* program hst_tx_timeout */
+               tmp = I915_READ(DSI_HSTX_TO(dsi_trans));
+               tmp &= ~HSTX_TIMEOUT_VALUE_MASK;
+               tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout);
+               I915_WRITE(DSI_HSTX_TO(dsi_trans), tmp);
+
+               /* FIXME: DSI_CALIB_TO */
+
+               /* program lp_rx_host timeout */
+               tmp = I915_READ(DSI_LPRX_HOST_TO(dsi_trans));
+               tmp &= ~LPRX_TIMEOUT_VALUE_MASK;
+               tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout);
+               I915_WRITE(DSI_LPRX_HOST_TO(dsi_trans), tmp);
+
+               /* FIXME: DSI_PWAIT_TO */
+
+               /* program turn around timeout */
+               tmp = I915_READ(DSI_TA_TO(dsi_trans));
+               tmp &= ~TA_TIMEOUT_VALUE_MASK;
+               tmp |= TA_TIMEOUT_VALUE(ta_timeout);
+               I915_WRITE(DSI_TA_TO(dsi_trans), tmp);
+       }
+}
+
+static void
+gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
+                             const struct intel_crtc_state *pipe_config)
 {
        /* step 4a: power up all lanes of the DDI used by DSI */
        gen11_dsi_power_up_lanes(encoder);
+
+       /* step 4b: configure lane sequencing of the Combo-PHY transmitters */
+       gen11_dsi_config_phy_lanes_sequence(encoder);
+
+       /* step 4c: configure voltage swing and skew */
+       gen11_dsi_voltage_swing_program_seq(encoder);
+
+       /* enable DDI buffer */
+       gen11_dsi_enable_ddi_buffer(encoder);
+
+       /* setup D-PHY timings */
+       gen11_dsi_setup_dphy_timings(encoder);
+
+       /* step 4h: setup DSI protocol timeouts */
+       gen11_dsi_setup_timeouts(encoder);
+
+       /* Step (4h, 4i, 4j, 4k): Configure transcoder */
+       gen11_dsi_configure_transcoder(encoder, pipe_config);
+}
+
+static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       struct mipi_dsi_device *dsi;
+       enum port port;
+       enum transcoder dsi_trans;
+       u32 tmp;
+       int ret;
+
+       /* set maximum return packet size */
+       for_each_dsi_port(port, intel_dsi->ports) {
+               dsi_trans = dsi_port_to_transcoder(port);
+
+               /*
+                * FIXME: This uses the number of DW's currently in the payload
+                * receive queue. This is probably not what we want here.
+                */
+               tmp = I915_READ(DSI_CMD_RXCTL(dsi_trans));
+               tmp &= NUMBER_RX_PLOAD_DW_MASK;
+               /* multiply "Number Rx Payload DW" by 4 to get max value */
+               tmp = tmp * 4;
+               dsi = intel_dsi->dsi_hosts[port]->device;
+               ret = mipi_dsi_set_maximum_return_packet_size(dsi, tmp);
+               if (ret < 0)
+                       DRM_ERROR("error setting max return pkt size%d\n", tmp);
+       }
+
+       /* panel power on related mipi dsi vbt sequences */
+       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
+       intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
+       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
+       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+
+       /* ensure all panel commands dispatched before enabling transcoder */
+       wait_for_cmds_dispatched_to_panel(encoder);
 }
 
 static void __attribute__((unused))
@@ -116,6 +804,8 @@ gen11_dsi_pre_enable(struct intel_encoder *encoder,
                     const struct intel_crtc_state *pipe_config,
                     const struct drm_connector_state *conn_state)
 {
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
        /* step2: enable IO power */
        gen11_dsi_enable_io_power(encoder);
 
@@ -123,5 +813,169 @@ gen11_dsi_pre_enable(struct intel_encoder *encoder,
        gen11_dsi_program_esc_clk_div(encoder);
 
        /* step4: enable DSI port and DPHY */
-       gen11_dsi_enable_port_and_phy(encoder);
+       gen11_dsi_enable_port_and_phy(encoder, pipe_config);
+
+       /* step5: program and powerup panel */
+       gen11_dsi_powerup_panel(encoder);
+
+       /* step6c: configure transcoder timings */
+       gen11_dsi_set_transcoder_timings(encoder, pipe_config);
+
+       /* step6d: enable dsi transcoder */
+       gen11_dsi_enable_transcoder(encoder);
+
+       /* step7: enable backlight */
+       intel_panel_enable_backlight(pipe_config, conn_state);
+       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
+}
+
+static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       enum port port;
+       enum transcoder dsi_trans;
+       u32 tmp;
+
+       for_each_dsi_port(port, intel_dsi->ports) {
+               dsi_trans = dsi_port_to_transcoder(port);
+
+               /* disable transcoder */
+               tmp = I915_READ(PIPECONF(dsi_trans));
+               tmp &= ~PIPECONF_ENABLE;
+               I915_WRITE(PIPECONF(dsi_trans), tmp);
+
+               /* wait for transcoder to be disabled */
+               if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
+                                           I965_PIPECONF_ACTIVE, 0, 50))
+                       DRM_ERROR("DSI trancoder not disabled\n");
+       }
+}
+
+static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder)
+{
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
+       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF);
+       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
+       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
+
+       /* ensure cmds dispatched to panel */
+       wait_for_cmds_dispatched_to_panel(encoder);
+}
+
+static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       enum port port;
+       enum transcoder dsi_trans;
+       u32 tmp;
+
+       /* put dsi link in ULPS */
+       for_each_dsi_port(port, intel_dsi->ports) {
+               dsi_trans = dsi_port_to_transcoder(port);
+               tmp = I915_READ(DSI_LP_MSG(dsi_trans));
+               tmp |= LINK_ENTER_ULPS;
+               tmp &= ~LINK_ULPS_TYPE_LP11;
+               I915_WRITE(DSI_LP_MSG(dsi_trans), tmp);
+
+               if (wait_for_us((I915_READ(DSI_LP_MSG(dsi_trans)) &
+                               LINK_IN_ULPS),
+                               10))
+                       DRM_ERROR("DSI link not in ULPS\n");
+       }
+
+       /* disable ddi function */
+       for_each_dsi_port(port, intel_dsi->ports) {
+               dsi_trans = dsi_port_to_transcoder(port);
+               tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+               tmp &= ~TRANS_DDI_FUNC_ENABLE;
+               I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
+       }
+
+       /* disable port sync mode if dual link */
+       if (intel_dsi->dual_link) {
+               for_each_dsi_port(port, intel_dsi->ports) {
+                       dsi_trans = dsi_port_to_transcoder(port);
+                       tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans));
+                       tmp &= ~PORT_SYNC_MODE_ENABLE;
+                       I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+               }
+       }
+}
+
+static void gen11_dsi_disable_port(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       u32 tmp;
+       enum port port;
+
+       for_each_dsi_port(port, intel_dsi->ports) {
+               tmp = I915_READ(DDI_BUF_CTL(port));
+               tmp &= ~DDI_BUF_CTL_ENABLE;
+               I915_WRITE(DDI_BUF_CTL(port), tmp);
+
+               if (wait_for_us((I915_READ(DDI_BUF_CTL(port)) &
+                                DDI_BUF_IS_IDLE),
+                                8))
+                       DRM_ERROR("DDI port:%c buffer not idle\n",
+                                 port_name(port));
+       }
+}
+
+static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       enum port port;
+       u32 tmp;
+
+       intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_A_IO);
+
+       if (intel_dsi->dual_link)
+               intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_B_IO);
+
+       /* set mode to DDI */
+       for_each_dsi_port(port, intel_dsi->ports) {
+               tmp = I915_READ(ICL_DSI_IO_MODECTL(port));
+               tmp &= ~COMBO_PHY_MODE_DSI;
+               I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
+       }
+}
+
+static void __attribute__((unused)) gen11_dsi_disable(
+                       struct intel_encoder *encoder,
+                       const struct intel_crtc_state *old_crtc_state,
+                       const struct drm_connector_state *old_conn_state)
+{
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
+       /* step1: turn off backlight */
+       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
+       intel_panel_disable_backlight(old_conn_state);
+
+       /* step2d,e: disable transcoder and wait */
+       gen11_dsi_disable_transcoder(encoder);
+
+       /* step2f,g: powerdown panel */
+       gen11_dsi_powerdown_panel(encoder);
+
+       /* step2h,i,j: deconfig trancoder */
+       gen11_dsi_deconfigure_trancoder(encoder);
+
+       /* step3: disable port */
+       gen11_dsi_disable_port(encoder);
+
+       /* step4: disable IO power */
+       gen11_dsi_disable_io_power(encoder);
+}
+
+void icl_dsi_init(struct drm_i915_private *dev_priv)
+{
+       enum port port;
+
+       if (!intel_bios_is_dsi_present(dev_priv, &port))
+               return;
 }
index b04952bacf77c01896ffdd910eed3d692e538d10..a5a2c8fe58a760a63703892018874f34a98e4d1e 100644 (file)
@@ -203,6 +203,72 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
        drm_atomic_helper_crtc_destroy_state(crtc, state);
 }
 
+static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
+                                     int num_scalers_need, struct intel_crtc *intel_crtc,
+                                     const char *name, int idx,
+                                     struct intel_plane_state *plane_state,
+                                     int *scaler_id)
+{
+       struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+       int j;
+       u32 mode;
+
+       if (*scaler_id < 0) {
+               /* find a free scaler */
+               for (j = 0; j < intel_crtc->num_scalers; j++) {
+                       if (scaler_state->scalers[j].in_use)
+                               continue;
+
+                       *scaler_id = j;
+                       scaler_state->scalers[*scaler_id].in_use = 1;
+                       break;
+               }
+       }
+
+       if (WARN(*scaler_id < 0, "Cannot find scaler for %s:%d\n", name, idx))
+               return;
+
+       /* set scaler mode */
+       if (plane_state && plane_state->base.fb &&
+           plane_state->base.fb->format->is_yuv &&
+           plane_state->base.fb->format->num_planes > 1) {
+               if (IS_GEN9(dev_priv) &&
+                   !IS_GEMINILAKE(dev_priv)) {
+                       mode = SKL_PS_SCALER_MODE_NV12;
+               } else if (icl_is_hdr_plane(to_intel_plane(plane_state->base.plane))) {
+                       /*
+                        * On gen11+'s HDR planes we only use the scaler for
+                        * scaling. They have a dedicated chroma upsampler, so
+                        * we don't need the scaler to upsample the UV plane.
+                        */
+                       mode = PS_SCALER_MODE_NORMAL;
+               } else {
+                       mode = PS_SCALER_MODE_PLANAR;
+
+                       if (plane_state->linked_plane)
+                               mode |= PS_PLANE_Y_SEL(plane_state->linked_plane->id);
+               }
+       } else if (INTEL_GEN(dev_priv) > 9 || IS_GEMINILAKE(dev_priv)) {
+               mode = PS_SCALER_MODE_NORMAL;
+       } else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
+               /*
+                * when only 1 scaler is in use on a pipe with 2 scalers
+                * scaler 0 operates in high quality (HQ) mode.
+                * In this case use scaler 0 to take advantage of HQ mode
+                */
+               scaler_state->scalers[*scaler_id].in_use = 0;
+               *scaler_id = 0;
+               scaler_state->scalers[0].in_use = 1;
+               mode = SKL_PS_SCALER_MODE_HQ;
+       } else {
+               mode = SKL_PS_SCALER_MODE_DYN;
+       }
+
+       DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
+                     intel_crtc->pipe, *scaler_id, name, idx);
+       scaler_state->scalers[*scaler_id].mode = mode;
+}
+
 /**
  * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
  * @dev_priv: i915 device
@@ -232,7 +298,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
        struct drm_atomic_state *drm_state = crtc_state->base.state;
        struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
        int num_scalers_need;
-       int i, j;
+       int i;
 
        num_scalers_need = hweight32(scaler_state->scaler_users);
 
@@ -304,59 +370,17 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
                        idx = plane->base.id;
 
                        /* plane on different crtc cannot be a scaler user of this crtc */
-                       if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
+                       if (WARN_ON(intel_plane->pipe != intel_crtc->pipe))
                                continue;
-                       }
 
                        plane_state = intel_atomic_get_new_plane_state(intel_state,
                                                                       intel_plane);
                        scaler_id = &plane_state->scaler_id;
                }
 
-               if (*scaler_id < 0) {
-                       /* find a free scaler */
-                       for (j = 0; j < intel_crtc->num_scalers; j++) {
-                               if (!scaler_state->scalers[j].in_use) {
-                                       scaler_state->scalers[j].in_use = 1;
-                                       *scaler_id = j;
-                                       DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
-                                               intel_crtc->pipe, *scaler_id, name, idx);
-                                       break;
-                               }
-                       }
-               }
-
-               if (WARN_ON(*scaler_id < 0)) {
-                       DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
-                       continue;
-               }
-
-               /* set scaler mode */
-               if ((INTEL_GEN(dev_priv) >= 9) &&
-                   plane_state && plane_state->base.fb &&
-                   plane_state->base.fb->format->format ==
-                   DRM_FORMAT_NV12) {
-                       if (INTEL_GEN(dev_priv) == 9 &&
-                           !IS_GEMINILAKE(dev_priv) &&
-                           !IS_SKYLAKE(dev_priv))
-                               scaler_state->scalers[*scaler_id].mode =
-                                       SKL_PS_SCALER_MODE_NV12;
-                       else
-                               scaler_state->scalers[*scaler_id].mode =
-                                       PS_SCALER_MODE_PLANAR;
-               } else if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
-                       /*
-                        * when only 1 scaler is in use on either pipe A or B,
-                        * scaler 0 operates in high quality (HQ) mode.
-                        * In this case use scaler 0 to take advantage of HQ mode
-                        */
-                       *scaler_id = 0;
-                       scaler_state->scalers[0].in_use = 1;
-                       scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ;
-                       scaler_state->scalers[1].in_use = 0;
-               } else {
-                       scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN;
-               }
+               intel_atomic_setup_scaler(scaler_state, num_scalers_need,
+                                         intel_crtc, name, idx,
+                                         plane_state, scaler_id);
        }
 
        return 0;
index aabebe0d2e9b48ab0e23586eca62dadee7f93a35..905f8ef3ba4fce9fb7a34e757e168c4440c63774 100644 (file)
 #include <drm/drm_plane_helper.h>
 #include "intel_drv.h"
 
-/**
- * intel_create_plane_state - create plane state object
- * @plane: drm plane
- *
- * Allocates a fresh plane state for the given plane and sets some of
- * the state values to sensible initial values.
- *
- * Returns: A newly allocated plane state, or NULL on failure
- */
-struct intel_plane_state *
-intel_create_plane_state(struct drm_plane *plane)
+struct intel_plane *intel_plane_alloc(void)
 {
-       struct intel_plane_state *state;
+       struct intel_plane_state *plane_state;
+       struct intel_plane *plane;
 
-       state = kzalloc(sizeof(*state), GFP_KERNEL);
-       if (!state)
-               return NULL;
+       plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+       if (!plane)
+               return ERR_PTR(-ENOMEM);
 
-       state->base.plane = plane;
-       state->base.rotation = DRM_MODE_ROTATE_0;
+       plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+       if (!plane_state) {
+               kfree(plane);
+               return ERR_PTR(-ENOMEM);
+       }
 
-       return state;
+       __drm_atomic_helper_plane_reset(&plane->base, &plane_state->base);
+       plane_state->scaler_id = -1;
+
+       return plane;
+}
+
+void intel_plane_free(struct intel_plane *plane)
+{
+       intel_plane_destroy_state(&plane->base, plane->base.state);
+       kfree(plane);
 }
 
 /**
@@ -117,10 +120,14 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
        struct intel_plane *intel_plane = to_intel_plane(plane);
        int ret;
 
+       crtc_state->active_planes &= ~BIT(intel_plane->id);
+       crtc_state->nv12_planes &= ~BIT(intel_plane->id);
+       intel_state->base.visible = false;
+
+       /* If this is a cursor plane, no further checks are needed. */
        if (!intel_state->base.crtc && !old_plane_state->base.crtc)
                return 0;
 
-       intel_state->base.visible = false;
        ret = intel_plane->check_plane(crtc_state, intel_state);
        if (ret)
                return ret;
@@ -128,13 +135,9 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
        /* FIXME pre-g4x don't work like this */
        if (state->visible)
                crtc_state->active_planes |= BIT(intel_plane->id);
-       else
-               crtc_state->active_planes &= ~BIT(intel_plane->id);
 
        if (state->visible && state->fb->format->format == DRM_FORMAT_NV12)
                crtc_state->nv12_planes |= BIT(intel_plane->id);
-       else
-               crtc_state->nv12_planes &= ~BIT(intel_plane->id);
 
        return intel_plane_atomic_calc_changes(old_crtc_state,
                                               &crtc_state->base,
@@ -152,6 +155,7 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
        const struct drm_crtc_state *old_crtc_state;
        struct drm_crtc_state *new_crtc_state;
 
+       new_plane_state->visible = false;
        if (!crtc)
                return 0;
 
@@ -164,29 +168,52 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
                                                   to_intel_plane_state(new_plane_state));
 }
 
-static void intel_plane_atomic_update(struct drm_plane *plane,
-                                     struct drm_plane_state *old_state)
+void intel_update_planes_on_crtc(struct intel_atomic_state *old_state,
+                                struct intel_crtc *crtc,
+                                struct intel_crtc_state *old_crtc_state,
+                                struct intel_crtc_state *new_crtc_state)
 {
-       struct intel_atomic_state *state = to_intel_atomic_state(old_state->state);
-       struct intel_plane *intel_plane = to_intel_plane(plane);
-       const struct intel_plane_state *new_plane_state =
-               intel_atomic_get_new_plane_state(state, intel_plane);
-       struct drm_crtc *crtc = new_plane_state->base.crtc ?: old_state->crtc;
-
-       if (new_plane_state->base.visible) {
-               const struct intel_crtc_state *new_crtc_state =
-                       intel_atomic_get_new_crtc_state(state, to_intel_crtc(crtc));
-
-               trace_intel_update_plane(plane,
-                                        to_intel_crtc(crtc));
-
-               intel_plane->update_plane(intel_plane,
-                                         new_crtc_state, new_plane_state);
-       } else {
-               trace_intel_disable_plane(plane,
-                                         to_intel_crtc(crtc));
-
-               intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
+       struct intel_plane_state *new_plane_state;
+       struct intel_plane *plane;
+       u32 update_mask;
+       int i;
+
+       update_mask = old_crtc_state->active_planes;
+       update_mask |= new_crtc_state->active_planes;
+
+       for_each_new_intel_plane_in_state(old_state, plane, new_plane_state, i) {
+               if (crtc->pipe != plane->pipe ||
+                   !(update_mask & BIT(plane->id)))
+                       continue;
+
+               if (new_plane_state->base.visible) {
+                       trace_intel_update_plane(&plane->base, crtc);
+
+                       plane->update_plane(plane, new_crtc_state, new_plane_state);
+               } else if (new_plane_state->slave) {
+                       struct intel_plane *master =
+                               new_plane_state->linked_plane;
+
+                       /*
+                        * We update the slave plane from this function because
+                        * programming it from the master plane's update_plane
+                        * callback runs into issues when the Y plane is
+                        * reassigned, disabled or used by a different plane.
+                        *
+                        * The slave plane is updated with the master plane's
+                        * plane_state.
+                        */
+                       new_plane_state =
+                               intel_atomic_get_new_plane_state(old_state, master);
+
+                       trace_intel_update_plane(&plane->base, crtc);
+
+                       plane->update_slave(plane, new_crtc_state, new_plane_state);
+               } else {
+                       trace_intel_disable_plane(&plane->base, crtc);
+
+                       plane->disable_plane(plane, crtc);
+               }
        }
 }
 
@@ -194,7 +221,6 @@ const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
        .prepare_fb = intel_prepare_plane_fb,
        .cleanup_fb = intel_cleanup_plane_fb,
        .atomic_check = intel_plane_atomic_check,
-       .atomic_update = intel_plane_atomic_update,
 };
 
 /**
index 769f3f5866611174cbabeca5e4d1fbb0711b9b86..ae55a6865d5cca98f8738cbf8db4cffe5039bfa2 100644 (file)
@@ -144,26 +144,43 @@ static const struct {
 /* HDMI N/CTS table */
 #define TMDS_297M 297000
 #define TMDS_296M 296703
+#define TMDS_594M 594000
+#define TMDS_593M 593407
+
 static const struct {
        int sample_rate;
        int clock;
        int n;
        int cts;
 } hdmi_aud_ncts[] = {
-       { 44100, TMDS_296M, 4459, 234375 },
-       { 44100, TMDS_297M, 4704, 247500 },
-       { 48000, TMDS_296M, 5824, 281250 },
-       { 48000, TMDS_297M, 5120, 247500 },
        { 32000, TMDS_296M, 5824, 421875 },
        { 32000, TMDS_297M, 3072, 222750 },
+       { 32000, TMDS_593M, 5824, 843750 },
+       { 32000, TMDS_594M, 3072, 445500 },
+       { 44100, TMDS_296M, 4459, 234375 },
+       { 44100, TMDS_297M, 4704, 247500 },
+       { 44100, TMDS_593M, 8918, 937500 },
+       { 44100, TMDS_594M, 9408, 990000 },
        { 88200, TMDS_296M, 8918, 234375 },
        { 88200, TMDS_297M, 9408, 247500 },
-       { 96000, TMDS_296M, 11648, 281250 },
-       { 96000, TMDS_297M, 10240, 247500 },
+       { 88200, TMDS_593M, 17836, 937500 },
+       { 88200, TMDS_594M, 18816, 990000 },
        { 176400, TMDS_296M, 17836, 234375 },
        { 176400, TMDS_297M, 18816, 247500 },
+       { 176400, TMDS_593M, 35672, 937500 },
+       { 176400, TMDS_594M, 37632, 990000 },
+       { 48000, TMDS_296M, 5824, 281250 },
+       { 48000, TMDS_297M, 5120, 247500 },
+       { 48000, TMDS_593M, 5824, 562500 },
+       { 48000, TMDS_594M, 6144, 594000 },
+       { 96000, TMDS_296M, 11648, 281250 },
+       { 96000, TMDS_297M, 10240, 247500 },
+       { 96000, TMDS_593M, 11648, 562500 },
+       { 96000, TMDS_594M, 12288, 594000 },
        { 192000, TMDS_296M, 23296, 281250 },
        { 192000, TMDS_297M, 20480, 247500 },
+       { 192000, TMDS_593M, 23296, 562500 },
+       { 192000, TMDS_594M, 24576, 594000 },
 };
 
 /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
@@ -912,6 +929,9 @@ static int i915_audio_component_bind(struct device *i915_kdev,
        if (WARN_ON(acomp->base.ops || acomp->base.dev))
                return -EEXIST;
 
+       if (WARN_ON(!device_link_add(hda_kdev, i915_kdev, DL_FLAG_STATELESS)))
+               return -ENOMEM;
+
        drm_modeset_lock_all(&dev_priv->drm);
        acomp->base.ops = &i915_audio_component_ops;
        acomp->base.dev = i915_kdev;
@@ -935,6 +955,8 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
        acomp->base.dev = NULL;
        dev_priv->audio_component = NULL;
        drm_modeset_unlock_all(&dev_priv->drm);
+
+       device_link_remove(hda_kdev, i915_kdev);
 }
 
 static const struct component_ops i915_audio_component_bind_ops = {
index 1faa494e2bc91a245861135ab5de015a7a6810ff..0694aa8bb9bcaa55e505c9678e857a00abd21dd5 100644 (file)
@@ -420,6 +420,13 @@ parse_general_features(struct drm_i915_private *dev_priv,
                intel_bios_ssc_frequency(dev_priv, general->ssc_freq);
        dev_priv->vbt.display_clock_mode = general->display_clock_mode;
        dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
+       if (bdb->version >= 181) {
+               dev_priv->vbt.orientation = general->rotate_180 ?
+                       DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP :
+                       DRM_MODE_PANEL_ORIENTATION_NORMAL;
+       } else {
+               dev_priv->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+       }
        DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
                      dev_priv->vbt.int_tv_support,
                      dev_priv->vbt.int_crt_support,
@@ -852,6 +859,30 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
 
        parse_dsi_backlight_ports(dev_priv, bdb->version, port);
 
+       /* FIXME is the 90 vs. 270 correct? */
+       switch (config->rotation) {
+       case ENABLE_ROTATION_0:
+               /*
+                * Most (all?) VBTs claim 0 degrees despite having
+                * an upside down panel, thus we do not trust this.
+                */
+               dev_priv->vbt.dsi.orientation =
+                       DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+               break;
+       case ENABLE_ROTATION_90:
+               dev_priv->vbt.dsi.orientation =
+                       DRM_MODE_PANEL_ORIENTATION_RIGHT_UP;
+               break;
+       case ENABLE_ROTATION_180:
+               dev_priv->vbt.dsi.orientation =
+                       DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
+               break;
+       case ENABLE_ROTATION_270:
+               dev_priv->vbt.dsi.orientation =
+                       DRM_MODE_PANEL_ORIENTATION_LEFT_UP;
+               break;
+       }
+
        /* We have mandatory mipi config blocks. Initialize as generic panel */
        dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
 }
@@ -2039,17 +2070,17 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
 
                dvo_port = child->dvo_port;
 
-               switch (dvo_port) {
-               case DVO_PORT_MIPIA:
-               case DVO_PORT_MIPIC:
+               if (dvo_port == DVO_PORT_MIPIA ||
+                   (dvo_port == DVO_PORT_MIPIB && IS_ICELAKE(dev_priv)) ||
+                   (dvo_port == DVO_PORT_MIPIC && !IS_ICELAKE(dev_priv))) {
                        if (port)
                                *port = dvo_port - DVO_PORT_MIPIA;
                        return true;
-               case DVO_PORT_MIPIB:
-               case DVO_PORT_MIPID:
+               } else if (dvo_port == DVO_PORT_MIPIB ||
+                          dvo_port == DVO_PORT_MIPIC ||
+                          dvo_port == DVO_PORT_MIPID) {
                        DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n",
                                      port_name(dvo_port - DVO_PORT_MIPIA));
-                       break;
                }
        }
 
@@ -2159,3 +2190,49 @@ intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
 
        return false;
 }
+
+enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
+                                  enum port port)
+{
+       const struct ddi_vbt_port_info *info =
+               &dev_priv->vbt.ddi_port_info[port];
+       enum aux_ch aux_ch;
+
+       if (!info->alternate_aux_channel) {
+               aux_ch = (enum aux_ch)port;
+
+               DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
+                             aux_ch_name(aux_ch), port_name(port));
+               return aux_ch;
+       }
+
+       switch (info->alternate_aux_channel) {
+       case DP_AUX_A:
+               aux_ch = AUX_CH_A;
+               break;
+       case DP_AUX_B:
+               aux_ch = AUX_CH_B;
+               break;
+       case DP_AUX_C:
+               aux_ch = AUX_CH_C;
+               break;
+       case DP_AUX_D:
+               aux_ch = AUX_CH_D;
+               break;
+       case DP_AUX_E:
+               aux_ch = AUX_CH_E;
+               break;
+       case DP_AUX_F:
+               aux_ch = AUX_CH_F;
+               break;
+       default:
+               MISSING_CASE(info->alternate_aux_channel);
+               aux_ch = AUX_CH_A;
+               break;
+       }
+
+       DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
+                     aux_ch_name(aux_ch), port_name(port));
+
+       return aux_ch;
+}
index 29075c763428055ddb3625a80b59643e694f3d76..25e3aba9cded6e45f3751039a3f7bb20845dd7b8 100644 (file)
@@ -2138,16 +2138,8 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv,
 static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
                                     int pixel_rate)
 {
-       if (INTEL_GEN(dev_priv) >= 10)
+       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
                return DIV_ROUND_UP(pixel_rate, 2);
-       else if (IS_GEMINILAKE(dev_priv))
-               /*
-                * FIXME: Avoid using a pixel clock that is more than 99% of the cdclk
-                * as a temporary workaround. Use a higher cdclk instead. (Note that
-                * intel_compute_max_dotclk() limits the max pixel clock to 99% of max
-                * cdclk.)
-                */
-               return DIV_ROUND_UP(pixel_rate * 100, 2 * 99);
        else if (IS_GEN9(dev_priv) ||
                 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
                return pixel_rate;
@@ -2543,14 +2535,8 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
 {
        int max_cdclk_freq = dev_priv->max_cdclk_freq;
 
-       if (INTEL_GEN(dev_priv) >= 10)
+       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
                return 2 * max_cdclk_freq;
-       else if (IS_GEMINILAKE(dev_priv))
-               /*
-                * FIXME: Limiting to 99% as a temporary workaround. See
-                * intel_min_cdclk() for details.
-                */
-               return 2 * max_cdclk_freq * 99 / 100;
        else if (IS_GEN9(dev_priv) ||
                 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
                return max_cdclk_freq;
@@ -2674,37 +2660,18 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
                fraction = 200;
        }
 
-       rawclk = CNP_RAWCLK_DIV((divider / 1000) - 1);
-       if (fraction)
-               rawclk |= CNP_RAWCLK_FRAC(DIV_ROUND_CLOSEST(1000,
-                                                           fraction) - 1);
-
-       I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
-       return divider + fraction;
-}
+       rawclk = CNP_RAWCLK_DIV(divider / 1000);
+       if (fraction) {
+               int numerator = 1;
 
-static int icp_rawclk(struct drm_i915_private *dev_priv)
-{
-       u32 rawclk;
-       int divider, numerator, denominator, frequency;
-
-       if (I915_READ(SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
-               frequency = 24000;
-               divider = 23;
-               numerator = 0;
-               denominator = 0;
-       } else {
-               frequency = 19200;
-               divider = 18;
-               numerator = 1;
-               denominator = 4;
+               rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000,
+                                                          fraction) - 1);
+               if (HAS_PCH_ICP(dev_priv))
+                       rawclk |= ICP_RAWCLK_NUM(numerator);
        }
 
-       rawclk = CNP_RAWCLK_DIV(divider) | ICP_RAWCLK_NUM(numerator) |
-                ICP_RAWCLK_DEN(denominator);
-
        I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
-       return frequency;
+       return divider + fraction;
 }
 
 static int pch_rawclk(struct drm_i915_private *dev_priv)
@@ -2754,9 +2721,7 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
  */
 void intel_update_rawclk(struct drm_i915_private *dev_priv)
 {
-       if (HAS_PCH_ICP(dev_priv))
-               dev_priv->rawclk_freq = icp_rawclk(dev_priv);
-       else if (HAS_PCH_CNP(dev_priv))
+       if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv))
                dev_priv->rawclk_freq = cnp_rawclk(dev_priv);
        else if (HAS_PCH_SPLIT(dev_priv))
                dev_priv->rawclk_freq = pch_rawclk(dev_priv);
index c6a7beabd58d1f636af7f3fb53a16214b4a7e6a1..5127da286a2b4f61ca5a32ce00220e01a9397f93 100644 (file)
@@ -149,7 +149,8 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
        if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
                limited_color_range = intel_crtc_state->limited_color_range;
 
-       if (intel_crtc_state->ycbcr420) {
+       if (intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+           intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
                ilk_load_ycbcr_conversion_matrix(intel_crtc);
                return;
        } else if (crtc_state->ctm) {
diff --git a/drivers/gpu/drm/i915/intel_combo_phy.c b/drivers/gpu/drm/i915/intel_combo_phy.c
new file mode 100644 (file)
index 0000000..3d0271c
--- /dev/null
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "intel_drv.h"
+
+#define for_each_combo_port(__dev_priv, __port) \
+       for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++)  \
+               for_each_if(intel_port_is_combophy(__dev_priv, __port))
+
+#define for_each_combo_port_reverse(__dev_priv, __port) \
+       for ((__port) = I915_MAX_PORTS; (__port)-- > PORT_A;) \
+               for_each_if(intel_port_is_combophy(__dev_priv, __port))
+
+enum {
+       PROCMON_0_85V_DOT_0,
+       PROCMON_0_95V_DOT_0,
+       PROCMON_0_95V_DOT_1,
+       PROCMON_1_05V_DOT_0,
+       PROCMON_1_05V_DOT_1,
+};
+
+static const struct cnl_procmon {
+       u32 dw1, dw9, dw10;
+} cnl_procmon_values[] = {
+       [PROCMON_0_85V_DOT_0] =
+               { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
+       [PROCMON_0_95V_DOT_0] =
+               { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
+       [PROCMON_0_95V_DOT_1] =
+               { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
+       [PROCMON_1_05V_DOT_0] =
+               { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
+       [PROCMON_1_05V_DOT_1] =
+               { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
+};
+
+/*
+ * CNL has just one set of registers, while ICL has two sets: one for port A and
+ * the other for port B. The CNL registers are equivalent to the ICL port A
+ * registers, that's why we call the ICL macros even though the function has CNL
+ * on its name.
+ */
+static const struct cnl_procmon *
+cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port)
+{
+       const struct cnl_procmon *procmon;
+       u32 val;
+
+       val = I915_READ(ICL_PORT_COMP_DW3(port));
+       switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
+       default:
+               MISSING_CASE(val);
+               /* fall through */
+       case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
+               procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
+               break;
+       case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
+               procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
+               break;
+       case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
+               procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
+               break;
+       case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
+               procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
+               break;
+       case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
+               procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
+               break;
+       }
+
+       return procmon;
+}
+
+static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
+                                      enum port port)
+{
+       const struct cnl_procmon *procmon;
+       u32 val;
+
+       procmon = cnl_get_procmon_ref_values(dev_priv, port);
+
+       val = I915_READ(ICL_PORT_COMP_DW1(port));
+       val &= ~((0xff << 16) | 0xff);
+       val |= procmon->dw1;
+       I915_WRITE(ICL_PORT_COMP_DW1(port), val);
+
+       I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
+       I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
+}
+
+static bool check_phy_reg(struct drm_i915_private *dev_priv,
+                         enum port port, i915_reg_t reg, u32 mask,
+                         u32 expected_val)
+{
+       u32 val = I915_READ(reg);
+
+       if ((val & mask) != expected_val) {
+               DRM_DEBUG_DRIVER("Port %c combo PHY reg %08x state mismatch: "
+                                "current %08x mask %08x expected %08x\n",
+                                port_name(port),
+                                reg.reg, val, mask, expected_val);
+               return false;
+       }
+
+       return true;
+}
+
+static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
+                                         enum port port)
+{
+       const struct cnl_procmon *procmon;
+       bool ret;
+
+       procmon = cnl_get_procmon_ref_values(dev_priv, port);
+
+       ret = check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW1(port),
+                           (0xff << 16) | 0xff, procmon->dw1);
+       ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW9(port),
+                            -1U, procmon->dw9);
+       ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW10(port),
+                            -1U, procmon->dw10);
+
+       return ret;
+}
+
+static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv)
+{
+       return !(I915_READ(CHICKEN_MISC_2) & CNL_COMP_PWR_DOWN) &&
+               (I915_READ(CNL_PORT_COMP_DW0) & COMP_INIT);
+}
+
+static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv)
+{
+       enum port port = PORT_A;
+       bool ret;
+
+       if (!cnl_combo_phy_enabled(dev_priv))
+               return false;
+
+       ret = cnl_verify_procmon_ref_values(dev_priv, port);
+
+       ret &= check_phy_reg(dev_priv, port, CNL_PORT_CL1CM_DW5,
+                            CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
+
+       return ret;
+}
+
+void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
+{
+       u32 val;
+
+       val = I915_READ(CHICKEN_MISC_2);
+       val &= ~CNL_COMP_PWR_DOWN;
+       I915_WRITE(CHICKEN_MISC_2, val);
+
+       /* Dummy PORT_A to get the correct CNL register from the ICL macro */
+       cnl_set_procmon_ref_values(dev_priv, PORT_A);
+
+       val = I915_READ(CNL_PORT_COMP_DW0);
+       val |= COMP_INIT;
+       I915_WRITE(CNL_PORT_COMP_DW0, val);
+
+       val = I915_READ(CNL_PORT_CL1CM_DW5);
+       val |= CL_POWER_DOWN_ENABLE;
+       I915_WRITE(CNL_PORT_CL1CM_DW5, val);
+}
+
+void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
+{
+       u32 val;
+
+       if (!cnl_combo_phy_verify_state(dev_priv))
+               DRM_WARN("Combo PHY HW state changed unexpectedly.\n");
+
+       val = I915_READ(CHICKEN_MISC_2);
+       val |= CNL_COMP_PWR_DOWN;
+       I915_WRITE(CHICKEN_MISC_2, val);
+}
+
+static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
+                                 enum port port)
+{
+       return !(I915_READ(ICL_PHY_MISC(port)) &
+                ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
+               (I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT);
+}
+
+static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
+                                      enum port port)
+{
+       bool ret;
+
+       if (!icl_combo_phy_enabled(dev_priv, port))
+               return false;
+
+       ret = cnl_verify_procmon_ref_values(dev_priv, port);
+
+       ret &= check_phy_reg(dev_priv, port, ICL_PORT_CL_DW5(port),
+                            CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
+
+       return ret;
+}
+
+void icl_combo_phys_init(struct drm_i915_private *dev_priv)
+{
+       enum port port;
+
+       for_each_combo_port(dev_priv, port) {
+               u32 val;
+
+               if (icl_combo_phy_verify_state(dev_priv, port)) {
+                       DRM_DEBUG_DRIVER("Port %c combo PHY already enabled, won't reprogram it.\n",
+                                        port_name(port));
+                       continue;
+               }
+
+               val = I915_READ(ICL_PHY_MISC(port));
+               val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
+               I915_WRITE(ICL_PHY_MISC(port), val);
+
+               cnl_set_procmon_ref_values(dev_priv, port);
+
+               val = I915_READ(ICL_PORT_COMP_DW0(port));
+               val |= COMP_INIT;
+               I915_WRITE(ICL_PORT_COMP_DW0(port), val);
+
+               val = I915_READ(ICL_PORT_CL_DW5(port));
+               val |= CL_POWER_DOWN_ENABLE;
+               I915_WRITE(ICL_PORT_CL_DW5(port), val);
+       }
+}
+
+void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
+{
+       enum port port;
+
+       for_each_combo_port_reverse(dev_priv, port) {
+               u32 val;
+
+               if (!icl_combo_phy_verify_state(dev_priv, port))
+                       DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n",
+                                port_name(port));
+
+               val = I915_READ(ICL_PHY_MISC(port));
+               val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
+               I915_WRITE(ICL_PHY_MISC(port), val);
+
+               val = I915_READ(ICL_PORT_COMP_DW0(port));
+               val &= ~COMP_INIT;
+               I915_WRITE(ICL_PORT_COMP_DW0(port), val);
+       }
+}
similarity index 54%
rename from drivers/gpu/drm/i915/intel_modes.c
rename to drivers/gpu/drm/i915/intel_connector.c
index ca44bf368e2428050ae45c17f2fa34bd7d7ac9b5..18e370f607bcc2e50105c5854b70d61611c22709 100644 (file)
 
 #include <linux/slab.h>
 #include <linux/i2c.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drmP.h>
 #include "intel_drv.h"
 #include "i915_drv.h"
 
+int intel_connector_init(struct intel_connector *connector)
+{
+       struct intel_digital_connector_state *conn_state;
+
+       /*
+        * Allocate enough memory to hold intel_digital_connector_state,
+        * This might be a few bytes too many, but for connectors that don't
+        * need it we'll free the state and allocate a smaller one on the first
+        * successful commit anyway.
+        */
+       conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
+       if (!conn_state)
+               return -ENOMEM;
+
+       __drm_atomic_helper_connector_reset(&connector->base,
+                                           &conn_state->base);
+
+       return 0;
+}
+
+struct intel_connector *intel_connector_alloc(void)
+{
+       struct intel_connector *connector;
+
+       connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+       if (!connector)
+               return NULL;
+
+       if (intel_connector_init(connector) < 0) {
+               kfree(connector);
+               return NULL;
+       }
+
+       return connector;
+}
+
+/*
+ * Free the bits allocated by intel_connector_alloc.
+ * This should only be used after intel_connector_alloc has returned
+ * successfully, and before drm_connector_init returns successfully.
+ * Otherwise the destroy callbacks for the connector and the state should
+ * take care of proper cleanup/free (see intel_connector_destroy).
+ */
+void intel_connector_free(struct intel_connector *connector)
+{
+       kfree(to_intel_digital_connector_state(connector->base.state));
+       kfree(connector);
+}
+
+/*
+ * Connector type independent destroy hook for drm_connector_funcs.
+ */
+void intel_connector_destroy(struct drm_connector *connector)
+{
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+
+       kfree(intel_connector->detect_edid);
+
+       if (!IS_ERR_OR_NULL(intel_connector->edid))
+               kfree(intel_connector->edid);
+
+       intel_panel_fini(&intel_connector->panel);
+
+       drm_connector_cleanup(connector);
+       kfree(connector);
+}
+
+int intel_connector_register(struct drm_connector *connector)
+{
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       int ret;
+
+       ret = intel_backlight_device_register(intel_connector);
+       if (ret)
+               goto err;
+
+       if (i915_inject_load_failure()) {
+               ret = -EFAULT;
+               goto err_backlight;
+       }
+
+       return 0;
+
+err_backlight:
+       intel_backlight_device_unregister(intel_connector);
+err:
+       return ret;
+}
+
+void intel_connector_unregister(struct drm_connector *connector)
+{
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+
+       intel_backlight_device_unregister(intel_connector);
+}
+
+void intel_connector_attach_encoder(struct intel_connector *connector,
+                                   struct intel_encoder *encoder)
+{
+       connector->encoder = encoder;
+       drm_connector_attach_encoder(&connector->base, &encoder->base);
+}
+
+/*
+ * Simple connector->get_hw_state implementation for encoders that support only
+ * one connector and no cloning and hence the encoder state determines the state
+ * of the connector.
+ */
+bool intel_connector_get_hw_state(struct intel_connector *connector)
+{
+       enum pipe pipe = 0;
+       struct intel_encoder *encoder = connector->encoder;
+
+       return encoder->get_hw_state(encoder, &pipe);
+}
+
+enum pipe intel_connector_get_pipe(struct intel_connector *connector)
+{
+       struct drm_device *dev = connector->base.dev;
+
+       WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+       if (!connector->base.state->crtc)
+               return INVALID_PIPE;
+
+       return to_intel_crtc(connector->base.state->crtc)->pipe;
+}
+
 /**
  * intel_connector_update_modes - update connector from edid
  * @connector: DRM connector device to use
index 0c6bf82bb059a87e1b6ce96e1412bb0aa60f92f3..68f2fb89ece3fa259bfb2da593a7a66c68c81021 100644 (file)
@@ -354,6 +354,7 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
                return false;
 
+       pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
        return true;
 }
 
@@ -368,6 +369,7 @@ static bool pch_crt_compute_config(struct intel_encoder *encoder,
                return false;
 
        pipe_config->has_pch_encoder = true;
+       pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
 
        return true;
 }
@@ -389,6 +391,7 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
                return false;
 
        pipe_config->has_pch_encoder = true;
+       pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
 
        /* LPT FDI RX only supports 8bpc. */
        if (HAS_PCH_LPT(dev_priv)) {
@@ -849,12 +852,6 @@ out:
        return status;
 }
 
-static void intel_crt_destroy(struct drm_connector *connector)
-{
-       drm_connector_cleanup(connector);
-       kfree(connector);
-}
-
 static int intel_crt_get_modes(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
@@ -909,7 +906,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
        .fill_modes = drm_helper_probe_single_connector_modes,
        .late_register = intel_connector_register,
        .early_unregister = intel_connector_unregister,
-       .destroy = intel_crt_destroy,
+       .destroy = intel_connector_destroy,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
        .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
 };
index d48186e9ddadf8027b78e6d87ec025b5ec3df027..a516697bf57dfc8d63434b95d3f15aaaff748c21 100644 (file)
  * low-power state and comes back to normal.
  */
 
-#define I915_CSR_ICL "i915/icl_dmc_ver1_07.bin"
-MODULE_FIRMWARE(I915_CSR_ICL);
-#define ICL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 7)
+#define GEN12_CSR_MAX_FW_SIZE          ICL_CSR_MAX_FW_SIZE
 
-#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin"
-MODULE_FIRMWARE(I915_CSR_GLK);
-#define GLK_CSR_VERSION_REQUIRED       CSR_VERSION(1, 4)
+#define ICL_CSR_PATH                   "i915/icl_dmc_ver1_07.bin"
+#define ICL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 7)
+#define ICL_CSR_MAX_FW_SIZE            0x6000
+MODULE_FIRMWARE(ICL_CSR_PATH);
 
-#define I915_CSR_CNL "i915/cnl_dmc_ver1_07.bin"
-MODULE_FIRMWARE(I915_CSR_CNL);
+#define CNL_CSR_PATH                   "i915/cnl_dmc_ver1_07.bin"
 #define CNL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 7)
+#define CNL_CSR_MAX_FW_SIZE            GLK_CSR_MAX_FW_SIZE
+MODULE_FIRMWARE(CNL_CSR_PATH);
+
+#define GLK_CSR_PATH                   "i915/glk_dmc_ver1_04.bin"
+#define GLK_CSR_VERSION_REQUIRED       CSR_VERSION(1, 4)
+#define GLK_CSR_MAX_FW_SIZE            0x4000
+MODULE_FIRMWARE(GLK_CSR_PATH);
 
-#define I915_CSR_KBL "i915/kbl_dmc_ver1_04.bin"
-MODULE_FIRMWARE(I915_CSR_KBL);
+#define KBL_CSR_PATH                   "i915/kbl_dmc_ver1_04.bin"
 #define KBL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 4)
+#define KBL_CSR_MAX_FW_SIZE            BXT_CSR_MAX_FW_SIZE
+MODULE_FIRMWARE(KBL_CSR_PATH);
 
-#define I915_CSR_SKL "i915/skl_dmc_ver1_27.bin"
-MODULE_FIRMWARE(I915_CSR_SKL);
+#define SKL_CSR_PATH                   "i915/skl_dmc_ver1_27.bin"
 #define SKL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 27)
+#define SKL_CSR_MAX_FW_SIZE            BXT_CSR_MAX_FW_SIZE
+MODULE_FIRMWARE(SKL_CSR_PATH);
 
-#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin"
-MODULE_FIRMWARE(I915_CSR_BXT);
+#define BXT_CSR_PATH                   "i915/bxt_dmc_ver1_07.bin"
 #define BXT_CSR_VERSION_REQUIRED       CSR_VERSION(1, 7)
-
-
 #define BXT_CSR_MAX_FW_SIZE            0x3000
-#define GLK_CSR_MAX_FW_SIZE            0x4000
-#define ICL_CSR_MAX_FW_SIZE            0x6000
+MODULE_FIRMWARE(BXT_CSR_PATH);
+
 #define CSR_DEFAULT_FW_OFFSET          0xFFFFFFFF
 
 struct intel_css_header {
@@ -190,6 +194,12 @@ static const struct stepping_info bxt_stepping_info[] = {
        {'B', '0'}, {'B', '1'}, {'B', '2'}
 };
 
+static const struct stepping_info icl_stepping_info[] = {
+       {'A', '0'}, {'A', '1'}, {'A', '2'},
+       {'B', '0'}, {'B', '2'},
+       {'C', '0'}
+};
+
 static const struct stepping_info no_stepping_info = { '*', '*' };
 
 static const struct stepping_info *
@@ -198,7 +208,10 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv)
        const struct stepping_info *si;
        unsigned int size;
 
-       if (IS_SKYLAKE(dev_priv)) {
+       if (IS_ICELAKE(dev_priv)) {
+               size = ARRAY_SIZE(icl_stepping_info);
+               si = icl_stepping_info;
+       } else if (IS_SKYLAKE(dev_priv)) {
                size = ARRAY_SIZE(skl_stepping_info);
                si = skl_stepping_info;
        } else if (IS_BROXTON(dev_priv)) {
@@ -285,10 +298,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
        struct intel_csr *csr = &dev_priv->csr;
        const struct stepping_info *si = intel_get_stepping_info(dev_priv);
        uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
-       uint32_t max_fw_size = 0;
        uint32_t i;
        uint32_t *dmc_payload;
-       uint32_t required_version;
 
        if (!fw)
                return NULL;
@@ -303,38 +314,19 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
                return NULL;
        }
 
-       csr->version = css_header->version;
-
-       if (csr->fw_path == i915_modparams.dmc_firmware_path) {
-               /* Bypass version check for firmware override. */
-               required_version = csr->version;
-       } else if (IS_ICELAKE(dev_priv)) {
-               required_version = ICL_CSR_VERSION_REQUIRED;
-       } else if (IS_CANNONLAKE(dev_priv)) {
-               required_version = CNL_CSR_VERSION_REQUIRED;
-       } else if (IS_GEMINILAKE(dev_priv)) {
-               required_version = GLK_CSR_VERSION_REQUIRED;
-       } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
-               required_version = KBL_CSR_VERSION_REQUIRED;
-       } else if (IS_SKYLAKE(dev_priv)) {
-               required_version = SKL_CSR_VERSION_REQUIRED;
-       } else if (IS_BROXTON(dev_priv)) {
-               required_version = BXT_CSR_VERSION_REQUIRED;
-       } else {
-               MISSING_CASE(INTEL_REVID(dev_priv));
-               required_version = 0;
-       }
-
-       if (csr->version != required_version) {
+       if (csr->required_version &&
+           css_header->version != csr->required_version) {
                DRM_INFO("Refusing to load DMC firmware v%u.%u,"
                         " please use v%u.%u\n",
-                        CSR_VERSION_MAJOR(csr->version),
-                        CSR_VERSION_MINOR(csr->version),
-                        CSR_VERSION_MAJOR(required_version),
-                        CSR_VERSION_MINOR(required_version));
+                        CSR_VERSION_MAJOR(css_header->version),
+                        CSR_VERSION_MINOR(css_header->version),
+                        CSR_VERSION_MAJOR(csr->required_version),
+                        CSR_VERSION_MINOR(csr->required_version));
                return NULL;
        }
 
+       csr->version = css_header->version;
+
        readcount += sizeof(struct intel_css_header);
 
        /* Extract Package Header information*/
@@ -402,15 +394,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
 
        /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
        nbytes = dmc_header->fw_size * 4;
-       if (INTEL_GEN(dev_priv) >= 11)
-               max_fw_size = ICL_CSR_MAX_FW_SIZE;
-       else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
-               max_fw_size = GLK_CSR_MAX_FW_SIZE;
-       else if (IS_GEN9(dev_priv))
-               max_fw_size = BXT_CSR_MAX_FW_SIZE;
-       else
-               MISSING_CASE(INTEL_REVID(dev_priv));
-       if (nbytes > max_fw_size) {
+       if (nbytes > csr->max_fw_size) {
                DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes);
                return NULL;
        }
@@ -475,27 +459,57 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
        if (!HAS_CSR(dev_priv))
                return;
 
-       if (i915_modparams.dmc_firmware_path)
-               csr->fw_path = i915_modparams.dmc_firmware_path;
-       else if (IS_ICELAKE(dev_priv))
-               csr->fw_path = I915_CSR_ICL;
-       else if (IS_CANNONLAKE(dev_priv))
-               csr->fw_path = I915_CSR_CNL;
-       else if (IS_GEMINILAKE(dev_priv))
-               csr->fw_path = I915_CSR_GLK;
-       else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
-               csr->fw_path = I915_CSR_KBL;
-       else if (IS_SKYLAKE(dev_priv))
-               csr->fw_path = I915_CSR_SKL;
-       else if (IS_BROXTON(dev_priv))
-               csr->fw_path = I915_CSR_BXT;
-
        /*
-        * Obtain a runtime pm reference, until CSR is loaded,
-        * to avoid entering runtime-suspend.
+        * Obtain a runtime pm reference, until CSR is loaded, to avoid entering
+        * runtime-suspend.
+        *
+        * On error, we return with the rpm wakeref held to prevent runtime
+        * suspend as runtime suspend *requires* a working CSR for whatever
+        * reason.
         */
        intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 
+       if (INTEL_GEN(dev_priv) >= 12) {
+               /* Allow to load fw via parameter using the last known size */
+               csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
+       } else if (IS_ICELAKE(dev_priv)) {
+               csr->fw_path = ICL_CSR_PATH;
+               csr->required_version = ICL_CSR_VERSION_REQUIRED;
+               csr->max_fw_size = ICL_CSR_MAX_FW_SIZE;
+       } else if (IS_CANNONLAKE(dev_priv)) {
+               csr->fw_path = CNL_CSR_PATH;
+               csr->required_version = CNL_CSR_VERSION_REQUIRED;
+               csr->max_fw_size = CNL_CSR_MAX_FW_SIZE;
+       } else if (IS_GEMINILAKE(dev_priv)) {
+               csr->fw_path = GLK_CSR_PATH;
+               csr->required_version = GLK_CSR_VERSION_REQUIRED;
+               csr->max_fw_size = GLK_CSR_MAX_FW_SIZE;
+       } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
+               csr->fw_path = KBL_CSR_PATH;
+               csr->required_version = KBL_CSR_VERSION_REQUIRED;
+               csr->max_fw_size = KBL_CSR_MAX_FW_SIZE;
+       } else if (IS_SKYLAKE(dev_priv)) {
+               csr->fw_path = SKL_CSR_PATH;
+               csr->required_version = SKL_CSR_VERSION_REQUIRED;
+               csr->max_fw_size = SKL_CSR_MAX_FW_SIZE;
+       } else if (IS_BROXTON(dev_priv)) {
+               csr->fw_path = BXT_CSR_PATH;
+               csr->required_version = BXT_CSR_VERSION_REQUIRED;
+               csr->max_fw_size = BXT_CSR_MAX_FW_SIZE;
+       }
+
+       if (i915_modparams.dmc_firmware_path) {
+               if (strlen(i915_modparams.dmc_firmware_path) == 0) {
+                       csr->fw_path = NULL;
+                       DRM_INFO("Disabling CSR firmware and runtime PM\n");
+                       return;
+               }
+
+               csr->fw_path = i915_modparams.dmc_firmware_path;
+               /* Bypass version check for firmware override. */
+               csr->required_version = 0;
+       }
+
        if (csr->fw_path == NULL) {
                DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n");
                WARN_ON(!IS_ALPHA_SUPPORT(INTEL_INFO(dev_priv)));
index 5186cd7075f919b047fb78701cb29a99b9bb8795..ad11540ac4360574793c34843dd826d31119855f 100644 (file)
@@ -642,7 +642,7 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
 static const struct ddi_buf_trans *
 kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
 {
-       if (IS_KBL_ULX(dev_priv)) {
+       if (IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
                *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
                return kbl_y_ddi_translations_dp;
        } else if (IS_KBL_ULT(dev_priv) || IS_CFL_ULT(dev_priv)) {
@@ -658,7 +658,7 @@ static const struct ddi_buf_trans *
 skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
 {
        if (dev_priv->vbt.edp.low_vswing) {
-               if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
+               if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
                        *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
                        return skl_y_ddi_translations_edp;
                } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv) ||
@@ -680,7 +680,7 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
 static const struct ddi_buf_trans *
 skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
 {
-       if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
+       if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
                *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
                return skl_y_ddi_translations_hdmi;
        } else {
@@ -1060,10 +1060,10 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
 }
 
 static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
-                                      const struct intel_shared_dpll *pll)
+                                      const struct intel_crtc_state *crtc_state)
 {
-       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-       int clock = crtc->config->port_clock;
+       const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+       int clock = crtc_state->port_clock;
        const enum intel_dpll_id id = pll->info->id;
 
        switch (id) {
@@ -1517,7 +1517,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
        else
                dotclock = pipe_config->port_clock;
 
-       if (pipe_config->ycbcr420)
+       if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
                dotclock *= 2;
 
        if (pipe_config->pixel_multiplier)
@@ -1737,16 +1737,16 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-       if (INTEL_GEN(dev_priv) <= 8)
-               hsw_ddi_clock_get(encoder, pipe_config);
-       else if (IS_GEN9_BC(dev_priv))
-               skl_ddi_clock_get(encoder, pipe_config);
-       else if (IS_GEN9_LP(dev_priv))
-               bxt_ddi_clock_get(encoder, pipe_config);
+       if (IS_ICELAKE(dev_priv))
+               icl_ddi_clock_get(encoder, pipe_config);
        else if (IS_CANNONLAKE(dev_priv))
                cnl_ddi_clock_get(encoder, pipe_config);
-       else if (IS_ICELAKE(dev_priv))
-               icl_ddi_clock_get(encoder, pipe_config);
+       else if (IS_GEN9_LP(dev_priv))
+               bxt_ddi_clock_get(encoder, pipe_config);
+       else if (IS_GEN9_BC(dev_priv))
+               skl_ddi_clock_get(encoder, pipe_config);
+       else if (INTEL_GEN(dev_priv) <= 8)
+               hsw_ddi_clock_get(encoder, pipe_config);
 }
 
 void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
@@ -1784,6 +1784,13 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
                break;
        }
 
+       /*
+        * As per DP 1.2 spec section 2.3.4.3 while sending
+        * YCBCR 444 signals we should program MSA MISC1/0 fields with
+        * colorspace information. The output colorspace encoding is BT601.
+        */
+       if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
+               temp |= TRANS_MSA_SAMPLING_444 | TRANS_MSA_CLRSP_YCBCR;
        I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
 }
 
@@ -1998,24 +2005,24 @@ out:
        return ret;
 }
 
-bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
-                           enum pipe *pipe)
+static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
+                                       u8 *pipe_mask, bool *is_dp_mst)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = encoder->port;
        enum pipe p;
        u32 tmp;
-       bool ret;
+       u8 mst_pipe_mask;
+
+       *pipe_mask = 0;
+       *is_dp_mst = false;
 
        if (!intel_display_power_get_if_enabled(dev_priv,
                                                encoder->power_domain))
-               return false;
-
-       ret = false;
+               return;
 
        tmp = I915_READ(DDI_BUF_CTL(port));
-
        if (!(tmp & DDI_BUF_CTL_ENABLE))
                goto out;
 
@@ -2023,44 +2030,58 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
                tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
 
                switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+               default:
+                       MISSING_CASE(tmp & TRANS_DDI_EDP_INPUT_MASK);
+                       /* fallthrough */
                case TRANS_DDI_EDP_INPUT_A_ON:
                case TRANS_DDI_EDP_INPUT_A_ONOFF:
-                       *pipe = PIPE_A;
+                       *pipe_mask = BIT(PIPE_A);
                        break;
                case TRANS_DDI_EDP_INPUT_B_ONOFF:
-                       *pipe = PIPE_B;
+                       *pipe_mask = BIT(PIPE_B);
                        break;
                case TRANS_DDI_EDP_INPUT_C_ONOFF:
-                       *pipe = PIPE_C;
+                       *pipe_mask = BIT(PIPE_C);
                        break;
                }
 
-               ret = true;
-
                goto out;
        }
 
+       mst_pipe_mask = 0;
        for_each_pipe(dev_priv, p) {
-               enum transcoder cpu_transcoder = (enum transcoder) p;
+               enum transcoder cpu_transcoder = (enum transcoder)p;
 
                tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
 
-               if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) {
-                       if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
-                           TRANS_DDI_MODE_SELECT_DP_MST)
-                               goto out;
+               if ((tmp & TRANS_DDI_PORT_MASK) != TRANS_DDI_SELECT_PORT(port))
+                       continue;
 
-                       *pipe = p;
-                       ret = true;
+               if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
+                   TRANS_DDI_MODE_SELECT_DP_MST)
+                       mst_pipe_mask |= BIT(p);
 
-                       goto out;
-               }
+               *pipe_mask |= BIT(p);
        }
 
-       DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
+       if (!*pipe_mask)
+               DRM_DEBUG_KMS("No pipe for ddi port %c found\n",
+                             port_name(port));
+
+       if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
+               DRM_DEBUG_KMS("Multiple pipes for non DP-MST port %c (pipe_mask %02x)\n",
+                             port_name(port), *pipe_mask);
+               *pipe_mask = BIT(ffs(*pipe_mask) - 1);
+       }
+
+       if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
+               DRM_DEBUG_KMS("Conflicting MST and non-MST encoders for port %c (pipe_mask %02x mst_pipe_mask %02x)\n",
+                             port_name(port), *pipe_mask, mst_pipe_mask);
+       else
+               *is_dp_mst = mst_pipe_mask;
 
 out:
-       if (ret && IS_GEN9_LP(dev_priv)) {
+       if (*pipe_mask && IS_GEN9_LP(dev_priv)) {
                tmp = I915_READ(BXT_PHY_CTL(port));
                if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
                            BXT_PHY_LANE_POWERDOWN_ACK |
@@ -2070,12 +2091,26 @@ out:
        }
 
        intel_display_power_put(dev_priv, encoder->power_domain);
+}
 
-       return ret;
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+                           enum pipe *pipe)
+{
+       u8 pipe_mask;
+       bool is_mst;
+
+       intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst);
+
+       if (is_mst || !pipe_mask)
+               return false;
+
+       *pipe = ffs(pipe_mask) - 1;
+
+       return true;
 }
 
 static inline enum intel_display_power_domain
-intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp)
+intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
 {
        /* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with
         * DC states enabled at the same time, while for driver initiated AUX
@@ -2089,13 +2124,14 @@ intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp)
         * Note that PSR is enabled only on Port A even though this function
         * returns the correct domain for other ports too.
         */
-       return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
-                                             intel_dp->aux_power_domain;
+       return dig_port->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
+                                             intel_aux_power_domain(dig_port);
 }
 
 static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
                                       struct intel_crtc_state *crtc_state)
 {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_digital_port *dig_port;
        u64 domains;
 
@@ -2110,12 +2146,13 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
        dig_port = enc_to_dig_port(&encoder->base);
        domains = BIT_ULL(dig_port->ddi_io_power_domain);
 
-       /* AUX power is only needed for (e)DP mode, not for HDMI. */
-       if (intel_crtc_has_dp_encoder(crtc_state)) {
-               struct intel_dp *intel_dp = &dig_port->dp;
-
-               domains |= BIT_ULL(intel_ddi_main_link_aux_domain(intel_dp));
-       }
+       /*
+        * AUX power is only needed for (e)DP mode, and for HDMI mode on TC
+        * ports.
+        */
+       if (intel_crtc_has_dp_encoder(crtc_state) ||
+           intel_port_is_tc(dev_priv, encoder->port))
+               domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port));
 
        return domains;
 }
@@ -2813,12 +2850,59 @@ void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
        }
 }
 
+void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       u32 val;
+       enum port port = encoder->port;
+       bool clk_enabled;
+
+       /*
+        * In case of DP MST, we sanitize the primary encoder only, not the
+        * virtual ones.
+        */
+       if (encoder->type == INTEL_OUTPUT_DP_MST)
+               return;
+
+       val = I915_READ(DPCLKA_CFGCR0_ICL);
+       clk_enabled = !(val & icl_dpclka_cfgcr0_clk_off(dev_priv, port));
+
+       if (!encoder->base.crtc && intel_encoder_is_dp(encoder)) {
+               u8 pipe_mask;
+               bool is_mst;
+
+               intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst);
+               /*
+                * In the unlikely case that BIOS enables DP in MST mode, just
+                * warn since our MST HW readout is incomplete.
+                */
+               if (WARN_ON(is_mst))
+                       return;
+       }
+
+       if (clk_enabled == !!encoder->base.crtc)
+               return;
+
+       /*
+        * Punt on the case now where clock is disabled, but the encoder is
+        * enabled, something else is really broken then.
+        */
+       if (WARN_ON(!clk_enabled))
+               return;
+
+       DRM_NOTE("Port %c is disabled but it has a mapped PLL, unmap it\n",
+                port_name(port));
+       val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+       I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+}
+
 static void intel_ddi_clk_select(struct intel_encoder *encoder,
-                                const struct intel_shared_dpll *pll)
+                                const struct intel_crtc_state *crtc_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum port port = encoder->port;
        uint32_t val;
+       const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 
        if (WARN_ON(!pll))
                return;
@@ -2828,7 +2912,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
        if (IS_ICELAKE(dev_priv)) {
                if (!intel_port_is_combophy(dev_priv, port))
                        I915_WRITE(DDI_CLK_SEL(port),
-                                  icl_pll_to_ddi_pll_sel(encoder, pll));
+                                  icl_pll_to_ddi_pll_sel(encoder, crtc_state));
        } else if (IS_CANNONLAKE(dev_priv)) {
                /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
                val = I915_READ(DPCLKA_CFGCR0);
@@ -2881,6 +2965,137 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
        }
 }
 
+static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
+{
+       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+       enum port port = dig_port->base.port;
+       enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+       i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
+       u32 val;
+       int i;
+
+       if (tc_port == PORT_TC_NONE)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
+               val = I915_READ(mg_regs[i]);
+               val |= MG_DP_MODE_CFG_TR2PWR_GATING |
+                      MG_DP_MODE_CFG_TRPWR_GATING |
+                      MG_DP_MODE_CFG_CLNPWR_GATING |
+                      MG_DP_MODE_CFG_DIGPWR_GATING |
+                      MG_DP_MODE_CFG_GAONPWR_GATING;
+               I915_WRITE(mg_regs[i], val);
+       }
+
+       val = I915_READ(MG_MISC_SUS0(tc_port));
+       val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) |
+              MG_MISC_SUS0_CFG_TR2PWR_GATING |
+              MG_MISC_SUS0_CFG_CL2PWR_GATING |
+              MG_MISC_SUS0_CFG_GAONPWR_GATING |
+              MG_MISC_SUS0_CFG_TRPWR_GATING |
+              MG_MISC_SUS0_CFG_CL1PWR_GATING |
+              MG_MISC_SUS0_CFG_DGPWR_GATING;
+       I915_WRITE(MG_MISC_SUS0(tc_port), val);
+}
+
+static void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
+{
+       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+       enum port port = dig_port->base.port;
+       enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+       i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
+       u32 val;
+       int i;
+
+       if (tc_port == PORT_TC_NONE)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
+               val = I915_READ(mg_regs[i]);
+               val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
+                        MG_DP_MODE_CFG_TRPWR_GATING |
+                        MG_DP_MODE_CFG_CLNPWR_GATING |
+                        MG_DP_MODE_CFG_DIGPWR_GATING |
+                        MG_DP_MODE_CFG_GAONPWR_GATING);
+               I915_WRITE(mg_regs[i], val);
+       }
+
+       val = I915_READ(MG_MISC_SUS0(tc_port));
+       val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK |
+                MG_MISC_SUS0_CFG_TR2PWR_GATING |
+                MG_MISC_SUS0_CFG_CL2PWR_GATING |
+                MG_MISC_SUS0_CFG_GAONPWR_GATING |
+                MG_MISC_SUS0_CFG_TRPWR_GATING |
+                MG_MISC_SUS0_CFG_CL1PWR_GATING |
+                MG_MISC_SUS0_CFG_DGPWR_GATING);
+       I915_WRITE(MG_MISC_SUS0(tc_port), val);
+}
+
+static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
+{
+       struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+       enum port port = intel_dig_port->base.port;
+       enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+       u32 ln0, ln1, lane_info;
+
+       if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
+               return;
+
+       ln0 = I915_READ(MG_DP_MODE(port, 0));
+       ln1 = I915_READ(MG_DP_MODE(port, 1));
+
+       switch (intel_dig_port->tc_type) {
+       case TC_PORT_TYPEC:
+               ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
+               ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
+
+               lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
+                            DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
+                           DP_LANE_ASSIGNMENT_SHIFT(tc_port);
+
+               switch (lane_info) {
+               case 0x1:
+               case 0x4:
+                       break;
+               case 0x2:
+                       ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
+                       break;
+               case 0x3:
+                       ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
+                              MG_DP_MODE_CFG_DP_X2_MODE;
+                       break;
+               case 0x8:
+                       ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
+                       break;
+               case 0xC:
+                       ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
+                              MG_DP_MODE_CFG_DP_X2_MODE;
+                       break;
+               case 0xF:
+                       ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
+                              MG_DP_MODE_CFG_DP_X2_MODE;
+                       ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
+                              MG_DP_MODE_CFG_DP_X2_MODE;
+                       break;
+               default:
+                       MISSING_CASE(lane_info);
+               }
+               break;
+
+       case TC_PORT_LEGACY:
+               ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
+               ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
+               break;
+
+       default:
+               MISSING_CASE(intel_dig_port->tc_type);
+               return;
+       }
+
+       I915_WRITE(MG_DP_MODE(port, 0), ln0);
+       I915_WRITE(MG_DP_MODE(port, 1), ln1);
+}
+
 static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
                                    const struct intel_crtc_state *crtc_state,
                                    const struct drm_connector_state *conn_state)
@@ -2894,19 +3109,16 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
 
        WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
 
-       intel_display_power_get(dev_priv,
-                               intel_ddi_main_link_aux_domain(intel_dp));
-
        intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
                                 crtc_state->lane_count, is_mst);
 
        intel_edp_panel_on(intel_dp);
 
-       intel_ddi_clk_select(encoder, crtc_state->shared_dpll);
+       intel_ddi_clk_select(encoder, crtc_state);
 
        intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
 
-       icl_program_mg_dp_mode(intel_dp);
+       icl_program_mg_dp_mode(dig_port);
        icl_disable_phy_clock_gating(dig_port);
 
        if (IS_ICELAKE(dev_priv))
@@ -2944,10 +3156,13 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
        struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
 
        intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
-       intel_ddi_clk_select(encoder, crtc_state->shared_dpll);
+       intel_ddi_clk_select(encoder, crtc_state);
 
        intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
 
+       icl_program_mg_dp_mode(dig_port);
+       icl_disable_phy_clock_gating(dig_port);
+
        if (IS_ICELAKE(dev_priv))
                icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
                                        level, INTEL_OUTPUT_HDMI);
@@ -2958,12 +3173,14 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
        else
                intel_prepare_hdmi_ddi_buffers(encoder, level);
 
+       icl_enable_phy_clock_gating(dig_port);
+
        if (IS_GEN9_BC(dev_priv))
                skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
 
        intel_ddi_enable_pipe_clock(crtc_state);
 
-       intel_dig_port->set_infoframes(&encoder->base,
+       intel_dig_port->set_infoframes(encoder,
                                       crtc_state->has_infoframe,
                                       crtc_state, conn_state);
 }
@@ -2993,10 +3210,22 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
 
        intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
                intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state);
-       else
+       } else {
+               struct intel_lspcon *lspcon =
+                               enc_to_intel_lspcon(&encoder->base);
+
                intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+               if (lspcon->active) {
+                       struct intel_digital_port *dig_port =
+                                       enc_to_dig_port(&encoder->base);
+
+                       dig_port->set_infoframes(encoder,
+                                                crtc_state->has_infoframe,
+                                                crtc_state, conn_state);
+               }
+       }
 }
 
 static void intel_disable_ddi_buf(struct intel_encoder *encoder)
@@ -3049,9 +3278,6 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
        intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
 
        intel_ddi_clk_disable(encoder);
-
-       intel_display_power_put(dev_priv,
-                               intel_ddi_main_link_aux_domain(intel_dp));
 }
 
 static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
@@ -3062,7 +3288,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
        struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
        struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
 
-       dig_port->set_infoframes(&encoder->base, false,
+       dig_port->set_infoframes(encoder, false,
                                 old_crtc_state, old_conn_state);
 
        intel_ddi_disable_pipe_clock(old_crtc_state);
@@ -3154,6 +3380,26 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
                intel_audio_codec_enable(encoder, crtc_state, conn_state);
 }
 
+static i915_reg_t
+gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
+                              enum port port)
+{
+       static const i915_reg_t regs[] = {
+               [PORT_A] = CHICKEN_TRANS_EDP,
+               [PORT_B] = CHICKEN_TRANS_A,
+               [PORT_C] = CHICKEN_TRANS_B,
+               [PORT_D] = CHICKEN_TRANS_C,
+               [PORT_E] = CHICKEN_TRANS_A,
+       };
+
+       WARN_ON(INTEL_GEN(dev_priv) < 9);
+
+       if (WARN_ON(port < PORT_A || port > PORT_E))
+               port = PORT_A;
+
+       return regs[port];
+}
+
 static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
                                  const struct intel_crtc_state *crtc_state,
                                  const struct drm_connector_state *conn_state)
@@ -3177,17 +3423,10 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
                 * the bits affect a specific DDI port rather than
                 * a specific transcoder.
                 */
-               static const enum transcoder port_to_transcoder[] = {
-                       [PORT_A] = TRANSCODER_EDP,
-                       [PORT_B] = TRANSCODER_A,
-                       [PORT_C] = TRANSCODER_B,
-                       [PORT_D] = TRANSCODER_C,
-                       [PORT_E] = TRANSCODER_A,
-               };
-               enum transcoder transcoder = port_to_transcoder[port];
+               i915_reg_t reg = gen9_chicken_trans_reg_by_port(dev_priv, port);
                u32 val;
 
-               val = I915_READ(CHICKEN_TRANS(transcoder));
+               val = I915_READ(reg);
 
                if (port == PORT_E)
                        val |= DDIE_TRAINING_OVERRIDE_ENABLE |
@@ -3196,8 +3435,8 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
                        val |= DDI_TRAINING_OVERRIDE_ENABLE |
                                DDI_TRAINING_OVERRIDE_VALUE;
 
-               I915_WRITE(CHICKEN_TRANS(transcoder), val);
-               POSTING_READ(CHICKEN_TRANS(transcoder));
+               I915_WRITE(reg, val);
+               POSTING_READ(reg);
 
                udelay(1);
 
@@ -3208,7 +3447,7 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
                        val &= ~(DDI_TRAINING_OVERRIDE_ENABLE |
                                 DDI_TRAINING_OVERRIDE_VALUE);
 
-               I915_WRITE(CHICKEN_TRANS(transcoder), val);
+               I915_WRITE(reg, val);
        }
 
        /* In HDMI/DVI mode, the port width, and swing/emphasis values
@@ -3282,13 +3521,76 @@ static void intel_disable_ddi(struct intel_encoder *encoder,
                intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state);
 }
 
-static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,
-                                  const struct intel_crtc_state *pipe_config,
-                                  const struct drm_connector_state *conn_state)
+static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
+                                        const struct intel_crtc_state *pipe_config,
+                                        enum port port)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+       enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+       u32 val = I915_READ(PORT_TX_DFLEXDPMLE1);
+       bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+
+       val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port);
+       switch (pipe_config->lane_count) {
+       case 1:
+               val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) :
+               DFLEXDPMLE1_DPMLETC_ML0(tc_port);
+               break;
+       case 2:
+               val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) :
+               DFLEXDPMLE1_DPMLETC_ML1_0(tc_port);
+               break;
+       case 4:
+               val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port);
+               break;
+       default:
+               MISSING_CASE(pipe_config->lane_count);
+       }
+       I915_WRITE(PORT_TX_DFLEXDPMLE1, val);
+}
+
+static void
+intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
+                        const struct intel_crtc_state *crtc_state,
+                        const struct drm_connector_state *conn_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+       enum port port = encoder->port;
+
+       if (intel_crtc_has_dp_encoder(crtc_state) ||
+           intel_port_is_tc(dev_priv, encoder->port))
+               intel_display_power_get(dev_priv,
+                                       intel_ddi_main_link_aux_domain(dig_port));
+
+       if (IS_GEN9_LP(dev_priv))
+               bxt_ddi_phy_set_lane_optim_mask(encoder,
+                                               crtc_state->lane_lat_optim_mask);
+
+       /*
+        * Program the lane count for static/dynamic connections on Type-C ports.
+        * Skip this step for TBT.
+        */
+       if (dig_port->tc_type == TC_PORT_UNKNOWN ||
+           dig_port->tc_type == TC_PORT_TBT)
+               return;
+
+       intel_ddi_set_fia_lane_count(encoder, crtc_state, port);
+}
+
+static void
+intel_ddi_post_pll_disable(struct intel_encoder *encoder,
+                          const struct intel_crtc_state *crtc_state,
+                          const struct drm_connector_state *conn_state)
 {
-       uint8_t mask = pipe_config->lane_lat_optim_mask;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
 
-       bxt_ddi_phy_set_lane_optim_mask(encoder, mask);
+       if (intel_crtc_has_dp_encoder(crtc_state) ||
+           intel_port_is_tc(dev_priv, encoder->port))
+               intel_display_power_put(dev_priv,
+                                       intel_ddi_main_link_aux_domain(dig_port));
 }
 
 void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -3353,10 +3655,10 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
 void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
                                         struct intel_crtc_state *crtc_state)
 {
-       if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
-               crtc_state->min_voltage_level = 2;
-       else if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
+       if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
                crtc_state->min_voltage_level = 1;
+       else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
+               crtc_state->min_voltage_level = 2;
 }
 
 void intel_ddi_get_config(struct intel_encoder *encoder,
@@ -3406,7 +3708,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
                pipe_config->has_hdmi_sink = true;
                intel_dig_port = enc_to_dig_port(&encoder->base);
 
-               if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config))
+               if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
                        pipe_config->has_infoframe = true;
 
                if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) ==
@@ -3767,6 +4069,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
        struct intel_encoder *intel_encoder;
        struct drm_encoder *encoder;
        bool init_hdmi, init_dp, init_lspcon = false;
+       enum pipe pipe;
 
 
        init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
@@ -3805,8 +4108,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
        intel_encoder->compute_output_type = intel_ddi_compute_output_type;
        intel_encoder->compute_config = intel_ddi_compute_config;
        intel_encoder->enable = intel_enable_ddi;
-       if (IS_GEN9_LP(dev_priv))
-               intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable;
+       intel_encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
+       intel_encoder->post_pll_disable = intel_ddi_post_pll_disable;
        intel_encoder->pre_enable = intel_ddi_pre_enable;
        intel_encoder->disable = intel_disable_ddi;
        intel_encoder->post_disable = intel_ddi_post_disable;
@@ -3817,8 +4120,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
        intel_encoder->type = INTEL_OUTPUT_DDI;
        intel_encoder->power_domain = intel_port_to_power_domain(port);
        intel_encoder->port = port;
-       intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
        intel_encoder->cloneable = 0;
+       for_each_pipe(dev_priv, pipe)
+               intel_encoder->crtc_mask |= BIT(pipe);
 
        if (INTEL_GEN(dev_priv) >= 11)
                intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
@@ -3828,6 +4132,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
                        (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
        intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
        intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
+       intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
 
        switch (port) {
        case PORT_A:
@@ -3858,8 +4163,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
                MISSING_CASE(port);
        }
 
-       intel_infoframe_init(intel_dig_port);
-
        if (init_dp) {
                if (!intel_ddi_init_dp_connector(intel_dig_port))
                        goto err;
@@ -3888,6 +4191,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
                                port_name(port));
        }
 
+       intel_infoframe_init(intel_dig_port);
        return;
 
 err:
index 0ef0c6448d53a835fbdf5319a8010c64d613bd0f..ceecb5bd5226ac1fd0357f9e5b96a79d574d6233 100644 (file)
@@ -474,7 +474,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
                        u8 eu_disabled_mask;
                        u32 n_disabled;
 
-                       if (!(sseu->subslice_mask[ss] & BIT(ss)))
+                       if (!(sseu->subslice_mask[s] & BIT(ss)))
                                /* skip disabled subslice */
                                continue;
 
@@ -744,27 +744,30 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
        if (INTEL_GEN(dev_priv) >= 10) {
                for_each_pipe(dev_priv, pipe)
                        info->num_scalers[pipe] = 2;
-       } else if (INTEL_GEN(dev_priv) == 9) {
+       } else if (IS_GEN9(dev_priv)) {
                info->num_scalers[PIPE_A] = 2;
                info->num_scalers[PIPE_B] = 2;
                info->num_scalers[PIPE_C] = 1;
        }
 
-       BUILD_BUG_ON(I915_NUM_ENGINES >
-                    sizeof(intel_ring_mask_t) * BITS_PER_BYTE);
+       BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t));
 
-       /*
-        * Skylake and Broxton currently don't expose the topmost plane as its
-        * use is exclusive with the legacy cursor and we only want to expose
-        * one of those, not both. Until we can safely expose the topmost plane
-        * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
-        * we don't expose the topmost plane at all to prevent ABI breakage
-        * down the line.
-        */
-       if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv))
+       if (IS_GEN11(dev_priv))
+               for_each_pipe(dev_priv, pipe)
+                       info->num_sprites[pipe] = 6;
+       else if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv))
                for_each_pipe(dev_priv, pipe)
                        info->num_sprites[pipe] = 3;
        else if (IS_BROXTON(dev_priv)) {
+               /*
+                * Skylake and Broxton currently don't expose the topmost plane as its
+                * use is exclusive with the legacy cursor and we only want to expose
+                * one of those, not both. Until we can safely expose the topmost plane
+                * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
+                * we don't expose the topmost plane at all to prevent ABI breakage
+                * down the line.
+                */
+
                info->num_sprites[PIPE_A] = 2;
                info->num_sprites[PIPE_B] = 2;
                info->num_sprites[PIPE_C] = 1;
@@ -844,13 +847,18 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
                cherryview_sseu_info_init(dev_priv);
        else if (IS_BROADWELL(dev_priv))
                broadwell_sseu_info_init(dev_priv);
-       else if (INTEL_GEN(dev_priv) == 9)
+       else if (IS_GEN9(dev_priv))
                gen9_sseu_info_init(dev_priv);
-       else if (INTEL_GEN(dev_priv) == 10)
+       else if (IS_GEN10(dev_priv))
                gen10_sseu_info_init(dev_priv);
        else if (INTEL_GEN(dev_priv) >= 11)
                gen11_sseu_info_init(dev_priv);
 
+       if (IS_GEN6(dev_priv) && intel_vtd_active()) {
+               DRM_INFO("Disabling ppGTT for VT-d support\n");
+               info->ppgtt = INTEL_PPGTT_NONE;
+       }
+
        /* Initialize command stream timestamp frequency */
        info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
 }
@@ -872,40 +880,37 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps,
 void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
 {
        struct intel_device_info *info = mkwrite_device_info(dev_priv);
-       u8 vdbox_disable, vebox_disable;
        u32 media_fuse;
-       int i;
+       unsigned int i;
 
        if (INTEL_GEN(dev_priv) < 11)
                return;
 
-       media_fuse = I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
+       media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
 
-       vdbox_disable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
-       vebox_disable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
-                       GEN11_GT_VEBOX_DISABLE_SHIFT;
+       info->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
+       info->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
+                            GEN11_GT_VEBOX_DISABLE_SHIFT;
 
-       DRM_DEBUG_DRIVER("vdbox disable: %04x\n", vdbox_disable);
+       DRM_DEBUG_DRIVER("vdbox enable: %04x\n", info->vdbox_enable);
        for (i = 0; i < I915_MAX_VCS; i++) {
                if (!HAS_ENGINE(dev_priv, _VCS(i)))
                        continue;
 
-               if (!(BIT(i) & vdbox_disable))
-                       continue;
-
-               info->ring_mask &= ~ENGINE_MASK(_VCS(i));
-               DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
+               if (!(BIT(i) & info->vdbox_enable)) {
+                       info->ring_mask &= ~ENGINE_MASK(_VCS(i));
+                       DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
+               }
        }
 
-       DRM_DEBUG_DRIVER("vebox disable: %04x\n", vebox_disable);
+       DRM_DEBUG_DRIVER("vebox enable: %04x\n", info->vebox_enable);
        for (i = 0; i < I915_MAX_VECS; i++) {
                if (!HAS_ENGINE(dev_priv, _VECS(i)))
                        continue;
 
-               if (!(BIT(i) & vebox_disable))
-                       continue;
-
-               info->ring_mask &= ~ENGINE_MASK(_VECS(i));
-               DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
+               if (!(BIT(i) & info->vebox_enable)) {
+                       info->ring_mask &= ~ENGINE_MASK(_VECS(i));
+                       DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
+               }
        }
 }
index 6eecd64734d51808af700da8f7a41fa399fc0df8..88f97210dc49a09272bc424d532a5b9837720678 100644 (file)
@@ -25,6 +25,8 @@
 #ifndef _INTEL_DEVICE_INFO_H_
 #define _INTEL_DEVICE_INFO_H_
 
+#include <uapi/drm/i915_drm.h>
+
 #include "intel_display.h"
 
 struct drm_printer;
@@ -74,21 +76,25 @@ enum intel_platform {
        INTEL_MAX_PLATFORMS
 };
 
+enum intel_ppgtt {
+       INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
+       INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING,
+       INTEL_PPGTT_FULL = I915_GEM_PPGTT_FULL,
+       INTEL_PPGTT_FULL_4LVL,
+};
+
 #define DEV_INFO_FOR_EACH_FLAG(func) \
        func(is_mobile); \
        func(is_lp); \
        func(is_alpha_support); \
        /* Keep has_* in alphabetical order */ \
        func(has_64bit_reloc); \
-       func(has_aliasing_ppgtt); \
        func(has_csr); \
        func(has_ddi); \
        func(has_dp_mst); \
        func(has_reset_engine); \
        func(has_fbc); \
        func(has_fpga_dbg); \
-       func(has_full_ppgtt); \
-       func(has_full_48bit_ppgtt); \
        func(has_gmch_display); \
        func(has_guc); \
        func(has_guc_ct); \
@@ -118,7 +124,7 @@ enum intel_platform {
 
 struct sseu_dev_info {
        u8 slice_mask;
-       u8 subslice_mask[GEN_MAX_SUBSLICES];
+       u8 subslice_mask[GEN_MAX_SLICES];
        u16 eu_total;
        u8 eu_per_subslice;
        u8 min_eu_in_pool;
@@ -154,6 +160,7 @@ struct intel_device_info {
        enum intel_platform platform;
        u32 platform_mask;
 
+       enum intel_ppgtt ppgtt;
        unsigned int page_sizes; /* page sizes supported by the HW */
 
        u32 display_mmio_offset;
@@ -170,7 +177,6 @@ struct intel_device_info {
        /* Register offsets for the various display pipes and transcoders */
        int pipe_offsets[I915_MAX_TRANSCODERS];
        int trans_offsets[I915_MAX_TRANSCODERS];
-       int palette_offsets[I915_MAX_PIPES];
        int cursor_offsets[I915_MAX_PIPES];
 
        /* Slice/subslice/EU info */
@@ -178,6 +184,10 @@ struct intel_device_info {
 
        u32 cs_timestamp_frequency_khz;
 
+       /* Enabled (not fused off) media engine bitmasks. */
+       u8 vdbox_enable;
+       u8 vebox_enable;
+
        struct color_luts {
                u16 degamma_lut_size;
                u16 gamma_lut_size;
index 9741cc419e1b2bc1f5eb4771ae75468f0099289d..812ec5ae5c7b4617d2ce2f92fb8726f300c76c2e 100644 (file)
@@ -24,7 +24,6 @@
  *     Eric Anholt <eric@anholt.net>
  */
 
-#include <linux/dmi.h>
 #include <linux/module.h>
 #include <linux/input.h>
 #include <linux/i2c.h>
@@ -74,55 +73,6 @@ static const uint64_t i9xx_format_modifiers[] = {
        DRM_FORMAT_MOD_INVALID
 };
 
-static const uint32_t skl_primary_formats[] = {
-       DRM_FORMAT_C8,
-       DRM_FORMAT_RGB565,
-       DRM_FORMAT_XRGB8888,
-       DRM_FORMAT_XBGR8888,
-       DRM_FORMAT_ARGB8888,
-       DRM_FORMAT_ABGR8888,
-       DRM_FORMAT_XRGB2101010,
-       DRM_FORMAT_XBGR2101010,
-       DRM_FORMAT_YUYV,
-       DRM_FORMAT_YVYU,
-       DRM_FORMAT_UYVY,
-       DRM_FORMAT_VYUY,
-};
-
-static const uint32_t skl_pri_planar_formats[] = {
-       DRM_FORMAT_C8,
-       DRM_FORMAT_RGB565,
-       DRM_FORMAT_XRGB8888,
-       DRM_FORMAT_XBGR8888,
-       DRM_FORMAT_ARGB8888,
-       DRM_FORMAT_ABGR8888,
-       DRM_FORMAT_XRGB2101010,
-       DRM_FORMAT_XBGR2101010,
-       DRM_FORMAT_YUYV,
-       DRM_FORMAT_YVYU,
-       DRM_FORMAT_UYVY,
-       DRM_FORMAT_VYUY,
-       DRM_FORMAT_NV12,
-};
-
-static const uint64_t skl_format_modifiers_noccs[] = {
-       I915_FORMAT_MOD_Yf_TILED,
-       I915_FORMAT_MOD_Y_TILED,
-       I915_FORMAT_MOD_X_TILED,
-       DRM_FORMAT_MOD_LINEAR,
-       DRM_FORMAT_MOD_INVALID
-};
-
-static const uint64_t skl_format_modifiers_ccs[] = {
-       I915_FORMAT_MOD_Yf_TILED_CCS,
-       I915_FORMAT_MOD_Y_TILED_CCS,
-       I915_FORMAT_MOD_Yf_TILED,
-       I915_FORMAT_MOD_Y_TILED,
-       I915_FORMAT_MOD_X_TILED,
-       DRM_FORMAT_MOD_LINEAR,
-       DRM_FORMAT_MOD_INVALID
-};
-
 /* Cursor formats */
 static const uint32_t intel_cursor_formats[] = {
        DRM_FORMAT_ARGB8888,
@@ -141,15 +91,15 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
                                  struct drm_i915_gem_object *obj,
                                  struct drm_mode_fb_cmd2 *mode_cmd);
-static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
-static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
-static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
-static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
-                                        struct intel_link_m_n *m_n,
-                                        struct intel_link_m_n *m2_n2);
-static void ironlake_set_pipeconf(struct drm_crtc *crtc);
-static void haswell_set_pipeconf(struct drm_crtc *crtc);
-static void haswell_set_pipemisc(struct drm_crtc *crtc);
+static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
+static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
+static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
+                                        const struct intel_link_m_n *m_n,
+                                        const struct intel_link_m_n *m2_n2);
+static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state);
 static void vlv_prepare_pll(struct intel_crtc *crtc,
                            const struct intel_crtc_state *pipe_config);
 static void chv_prepare_pll(struct intel_crtc *crtc,
@@ -158,9 +108,9 @@ static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
                                    struct intel_crtc_state *crtc_state);
-static void skylake_pfit_enable(struct intel_crtc *crtc);
-static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
-static void ironlake_pfit_enable(struct intel_crtc *crtc);
+static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
+static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
+static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
 static void intel_modeset_setup_hw_state(struct drm_device *dev,
                                         struct drm_modeset_acquire_ctx *ctx);
 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
@@ -505,24 +455,9 @@ static const struct intel_limit intel_limits_bxt = {
        .p2 = { .p2_slow = 1, .p2_fast = 20 },
 };
 
-static void
-skl_wa_528(struct drm_i915_private *dev_priv, int pipe, bool enable)
-{
-       if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
-               return;
-
-       if (enable)
-               I915_WRITE(CHICKEN_PIPESL_1(pipe), HSW_FBCQ_DIS);
-       else
-               I915_WRITE(CHICKEN_PIPESL_1(pipe), 0);
-}
-
 static void
 skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
 {
-       if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
-               return;
-
        if (enable)
                I915_WRITE(CLKGATE_DIS_PSL(pipe),
                           DUPS1_GATING_DIS | DUPS2_GATING_DIS);
@@ -1381,6 +1316,7 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
                        "PCH LVDS enabled on transcoder %c, should be disabled\n",
                        pipe_name(pipe));
 
+       /* PCH SDVOB multiplex with HDMIB */
        assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
        assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
        assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
@@ -1565,14 +1501,15 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
        }
 }
 
-static void i9xx_disable_pll(struct intel_crtc *crtc)
+static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
 {
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum pipe pipe = crtc->pipe;
 
        /* Disable DVO 2x clock on both PLLs if necessary */
        if (IS_I830(dev_priv) &&
-           intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
+           intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO) &&
            !intel_num_dvo_pipes(dev_priv)) {
                I915_WRITE(DPLL(PIPE_B),
                           I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
@@ -1666,16 +1603,16 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
                     I915_READ(dpll_reg) & port_mask, expected_mask);
 }
 
-static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
-                                          enum pipe pipe)
+static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
 {
-       struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
-                                                               pipe);
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
        i915_reg_t reg;
        uint32_t val, pipeconf_val;
 
        /* Make sure PCH DPLL is enabled */
-       assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
+       assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
 
        /* FDI must be feeding us bits for PCH ports */
        assert_fdi_tx_enabled(dev_priv, pipe);
@@ -1701,7 +1638,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
                 * here for both 8bpc and 12bpc.
                 */
                val &= ~PIPECONF_BPC_MASK;
-               if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
+               if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
                        val |= PIPECONF_8BPC;
                else
                        val |= pipeconf_val & PIPECONF_BPC_MASK;
@@ -1710,7 +1647,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
        val &= ~TRANS_INTERLACE_MASK;
        if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
                if (HAS_PCH_IBX(dev_priv) &&
-                   intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
+                   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
                        val |= TRANS_LEGACY_INTERLACED_ILK;
                else
                        val |= TRANS_INTERLACED;
@@ -2254,6 +2191,11 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
        return new_offset;
 }
 
+static bool is_surface_linear(u64 modifier, int color_plane)
+{
+       return modifier == DRM_FORMAT_MOD_LINEAR;
+}
+
 static u32 intel_adjust_aligned_offset(int *x, int *y,
                                       const struct drm_framebuffer *fb,
                                       int color_plane,
@@ -2266,7 +2208,7 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
 
        WARN_ON(new_offset > old_offset);
 
-       if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
+       if (!is_surface_linear(fb->modifier, color_plane)) {
                unsigned int tile_size, tile_width, tile_height;
                unsigned int pitch_tiles;
 
@@ -2330,14 +2272,13 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
                                        unsigned int rotation,
                                        u32 alignment)
 {
-       uint64_t fb_modifier = fb->modifier;
        unsigned int cpp = fb->format->cpp[color_plane];
        u32 offset, offset_aligned;
 
        if (alignment)
                alignment--;
 
-       if (fb_modifier != DRM_FORMAT_MOD_LINEAR) {
+       if (!is_surface_linear(fb->modifier, color_plane)) {
                unsigned int tile_size, tile_width, tile_height;
                unsigned int tile_rows, tiles, pitch_tiles;
 
@@ -2574,7 +2515,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
                                                      tile_size);
                offset /= tile_size;
 
-               if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
+               if (!is_surface_linear(fb->modifier, i)) {
                        unsigned int tile_width, tile_height;
                        unsigned int pitch_tiles;
                        struct drm_rect r;
@@ -2788,10 +2729,6 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
                crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
        else
                crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
-
-       DRM_DEBUG_KMS("%s active planes 0x%x\n",
-                     crtc_state->base.crtc->name,
-                     crtc_state->active_planes);
 }
 
 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
@@ -2819,6 +2756,10 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
        struct intel_plane_state *plane_state =
                to_intel_plane_state(plane->base.state);
 
+       DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
+                     plane->base.base.id, plane->base.name,
+                     crtc->base.base.id, crtc->base.name);
+
        intel_set_plane_visible(crtc_state, plane_state, false);
        fixup_active_planes(crtc_state);
 
@@ -2890,6 +2831,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
        return;
 
 valid_fb:
+       intel_state->base.rotation = plane_config->rotation;
        intel_fill_fb_ggtt_view(&intel_state->view, fb,
                                intel_state->base.rotation);
        intel_state->color_plane[0].stride =
@@ -3098,28 +3040,6 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
        return 0;
 }
 
-static int
-skl_check_nv12_surface(struct intel_plane_state *plane_state)
-{
-       /* Display WA #1106 */
-       if (plane_state->base.rotation !=
-           (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90) &&
-           plane_state->base.rotation != DRM_MODE_ROTATE_270)
-               return 0;
-
-       /*
-        * src coordinates are rotated here.
-        * We check height but report it as width
-        */
-       if (((drm_rect_height(&plane_state->base.src) >> 16) % 4) != 0) {
-               DRM_DEBUG_KMS("src width must be multiple "
-                             "of 4 for rotated NV12\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
 {
        const struct drm_framebuffer *fb = plane_state->base.fb;
@@ -3198,9 +3118,6 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
         * the main surface setup depends on it.
         */
        if (fb->format->format == DRM_FORMAT_NV12) {
-               ret = skl_check_nv12_surface(plane_state);
-               if (ret)
-                       return ret;
                ret = skl_check_nv12_aux_surface(plane_state);
                if (ret)
                        return ret;
@@ -3448,7 +3365,6 @@ static void i9xx_update_plane(struct intel_plane *plane,
                              intel_plane_ggtt_offset(plane_state) +
                              dspaddr_offset);
        }
-       POSTING_READ_FW(reg);
 
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
@@ -3467,7 +3383,6 @@ static void i9xx_disable_plane(struct intel_plane *plane,
                I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
        else
                I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
-       POSTING_READ_FW(DSPCNTR(i9xx_plane));
 
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
@@ -3527,13 +3442,13 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
 /*
  * This function detaches (aka. unbinds) unused scalers in hardware
  */
-static void skl_detach_scalers(struct intel_crtc *intel_crtc)
+static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
 {
-       struct intel_crtc_scaler_state *scaler_state;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+       const struct intel_crtc_scaler_state *scaler_state =
+               &crtc_state->scaler_state;
        int i;
 
-       scaler_state = &intel_crtc->config->scaler_state;
-
        /* loop through and disable scalers that aren't in use */
        for (i = 0; i < intel_crtc->num_scalers; i++) {
                if (!scaler_state->scalers[i].in_use)
@@ -3597,29 +3512,38 @@ static u32 skl_plane_ctl_format(uint32_t pixel_format)
        return 0;
 }
 
-/*
- * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
- * to be already pre-multiplied. We need to add a knob (or a different
- * DRM_FORMAT) for user-space to configure that.
- */
-static u32 skl_plane_ctl_alpha(uint32_t pixel_format)
+static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
 {
-       switch (pixel_format) {
-       case DRM_FORMAT_ABGR8888:
-       case DRM_FORMAT_ARGB8888:
+       if (!plane_state->base.fb->format->has_alpha)
+               return PLANE_CTL_ALPHA_DISABLE;
+
+       switch (plane_state->base.pixel_blend_mode) {
+       case DRM_MODE_BLEND_PIXEL_NONE:
+               return PLANE_CTL_ALPHA_DISABLE;
+       case DRM_MODE_BLEND_PREMULTI:
                return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+       case DRM_MODE_BLEND_COVERAGE:
+               return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
        default:
+               MISSING_CASE(plane_state->base.pixel_blend_mode);
                return PLANE_CTL_ALPHA_DISABLE;
        }
 }
 
-static u32 glk_plane_color_ctl_alpha(uint32_t pixel_format)
+static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
 {
-       switch (pixel_format) {
-       case DRM_FORMAT_ABGR8888:
-       case DRM_FORMAT_ARGB8888:
+       if (!plane_state->base.fb->format->has_alpha)
+               return PLANE_COLOR_ALPHA_DISABLE;
+
+       switch (plane_state->base.pixel_blend_mode) {
+       case DRM_MODE_BLEND_PIXEL_NONE:
+               return PLANE_COLOR_ALPHA_DISABLE;
+       case DRM_MODE_BLEND_PREMULTI:
                return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
+       case DRM_MODE_BLEND_COVERAGE:
+               return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
        default:
+               MISSING_CASE(plane_state->base.pixel_blend_mode);
                return PLANE_COLOR_ALPHA_DISABLE;
        }
 }
@@ -3696,7 +3620,7 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
        plane_ctl = PLANE_CTL_ENABLE;
 
        if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
-               plane_ctl |= skl_plane_ctl_alpha(fb->format->format);
+               plane_ctl |= skl_plane_ctl_alpha(plane_state);
                plane_ctl |=
                        PLANE_CTL_PIPE_GAMMA_ENABLE |
                        PLANE_CTL_PIPE_CSC_ENABLE |
@@ -3731,6 +3655,7 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
        struct drm_i915_private *dev_priv =
                to_i915(plane_state->base.plane->dev);
        const struct drm_framebuffer *fb = plane_state->base.fb;
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
        u32 plane_color_ctl = 0;
 
        if (INTEL_GEN(dev_priv) < 11) {
@@ -3738,9 +3663,9 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
                plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
        }
        plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
-       plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
+       plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
 
-       if (fb->format->is_yuv) {
+       if (fb->format->is_yuv && !icl_is_hdr_plane(plane)) {
                if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
                        plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
                else
@@ -3748,6 +3673,8 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
 
                if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
                        plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
+       } else if (fb->format->is_yuv) {
+               plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
        }
 
        return plane_color_ctl;
@@ -3932,15 +3859,15 @@ static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_sta
 
        /* on skylake this is done by detaching scalers */
        if (INTEL_GEN(dev_priv) >= 9) {
-               skl_detach_scalers(crtc);
+               skl_detach_scalers(new_crtc_state);
 
                if (new_crtc_state->pch_pfit.enabled)
-                       skylake_pfit_enable(crtc);
+                       skylake_pfit_enable(new_crtc_state);
        } else if (HAS_PCH_SPLIT(dev_priv)) {
                if (new_crtc_state->pch_pfit.enabled)
-                       ironlake_pfit_enable(crtc);
+                       ironlake_pfit_enable(new_crtc_state);
                else if (old_crtc_state->pch_pfit.enabled)
-                       ironlake_pfit_disable(crtc, true);
+                       ironlake_pfit_disable(old_crtc_state);
        }
 }
 
@@ -4339,10 +4266,10 @@ train_done:
        DRM_DEBUG_KMS("FDI train done.\n");
 }
 
-static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
+static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
        int pipe = intel_crtc->pipe;
        i915_reg_t reg;
        u32 temp;
@@ -4351,7 +4278,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
        reg = FDI_RX_CTL(pipe);
        temp = I915_READ(reg);
        temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
-       temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
+       temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
        temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
        I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
 
@@ -4500,10 +4427,11 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
 }
 
 /* Program iCLKIP clock to the desired frequency */
-static void lpt_program_iclkip(struct intel_crtc *crtc)
+static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
 {
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       int clock = crtc->config->base.adjusted_mode.crtc_clock;
+       int clock = crtc_state->base.adjusted_mode.crtc_clock;
        u32 divsel, phaseinc, auxdiv, phasedir = 0;
        u32 temp;
 
@@ -4614,12 +4542,12 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
                                 desired_divisor << auxdiv);
 }
 
-static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
+static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
                                                enum pipe pch_transcoder)
 {
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 
        I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
                   I915_READ(HTOTAL(cpu_transcoder)));
@@ -4638,9 +4566,8 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
                   I915_READ(VSYNCSHIFT(cpu_transcoder)));
 }
 
-static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
+static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t temp;
 
        temp = I915_READ(SOUTH_CHICKEN1);
@@ -4659,22 +4586,23 @@ static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
        POSTING_READ(SOUTH_CHICKEN1);
 }
 
-static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
+static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_device *dev = intel_crtc->base.dev;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-       switch (intel_crtc->pipe) {
+       switch (crtc->pipe) {
        case PIPE_A:
                break;
        case PIPE_B:
-               if (intel_crtc->config->fdi_lanes > 2)
-                       cpt_set_fdi_bc_bifurcation(dev, false);
+               if (crtc_state->fdi_lanes > 2)
+                       cpt_set_fdi_bc_bifurcation(dev_priv, false);
                else
-                       cpt_set_fdi_bc_bifurcation(dev, true);
+                       cpt_set_fdi_bc_bifurcation(dev_priv, true);
 
                break;
        case PIPE_C:
-               cpt_set_fdi_bc_bifurcation(dev, true);
+               cpt_set_fdi_bc_bifurcation(dev_priv, true);
 
                break;
        default:
@@ -4731,7 +4659,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
        assert_pch_transcoder_disabled(dev_priv, pipe);
 
        if (IS_IVYBRIDGE(dev_priv))
-               ivybridge_update_fdi_bc_bifurcation(crtc);
+               ivybridge_update_fdi_bc_bifurcation(crtc_state);
 
        /* Write the TU size bits before fdi link training, so that error
         * detection works. */
@@ -4764,11 +4692,11 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
         * Note that enable_shared_dpll tries to do the right thing, but
         * get_shared_dpll unconditionally resets the pll - we need that to have
         * the right LVDS enable sequence. */
-       intel_enable_shared_dpll(crtc);
+       intel_enable_shared_dpll(crtc_state);
 
        /* set transcoder timing, panel must allow it */
        assert_panel_unlocked(dev_priv, pipe);
-       ironlake_pch_transcoder_set_timings(crtc, pipe);
+       ironlake_pch_transcoder_set_timings(crtc_state, pipe);
 
        intel_fdi_normal_train(crtc);
 
@@ -4800,7 +4728,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
                I915_WRITE(reg, temp);
        }
 
-       ironlake_enable_pch_transcoder(dev_priv, pipe);
+       ironlake_enable_pch_transcoder(crtc_state);
 }
 
 static void lpt_pch_enable(const struct intel_atomic_state *state,
@@ -4812,10 +4740,10 @@ static void lpt_pch_enable(const struct intel_atomic_state *state,
 
        assert_pch_transcoder_disabled(dev_priv, PIPE_A);
 
-       lpt_program_iclkip(crtc);
+       lpt_program_iclkip(crtc_state);
 
        /* Set transcoder timing. */
-       ironlake_pch_transcoder_set_timings(crtc, PIPE_A);
+       ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
 
        lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
 }
@@ -4850,8 +4778,31 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
  * chroma samples for both of the luma samples, and thus we don't
  * actually get the expected MPEG2 chroma siting convention :(
  * The same behaviour is observed on pre-SKL platforms as well.
+ *
+ * Theory behind the formula (note that we ignore sub-pixel
+ * source coordinates):
+ * s = source sample position
+ * d = destination sample position
+ *
+ * Downscaling 4:1:
+ * -0.5
+ * | 0.0
+ * | |     1.5 (initial phase)
+ * | |     |
+ * v v     v
+ * | s | s | s | s |
+ * |       d       |
+ *
+ * Upscaling 1:4:
+ * -0.5
+ * | -0.375 (initial phase)
+ * | |     0.0
+ * | |     |
+ * v v     v
+ * |       s       |
+ * | d | d | d | d |
  */
-u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
+u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
 {
        int phase = -0x8000;
        u16 trip = 0;
@@ -4859,6 +4810,15 @@ u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
        if (chroma_cosited)
                phase += (sub - 1) * 0x8000 / sub;
 
+       phase += scale / (2 * sub);
+
+       /*
+        * Hardware initial phase limited to [-0.5:1.5].
+        * Since the max hardware scale factor is 3.0, we
+        * should never actually excdeed 1.0 here.
+        */
+       WARN_ON(phase < -0x8000 || phase > 0x18000);
+
        if (phase < 0)
                phase = 0x10000 + phase;
        else
@@ -4871,8 +4831,7 @@ static int
 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
                  unsigned int scaler_user, int *scaler_id,
                  int src_w, int src_h, int dst_w, int dst_h,
-                 bool plane_scaler_check,
-                 uint32_t pixel_format)
+                 const struct drm_format_info *format, bool need_scaler)
 {
        struct intel_crtc_scaler_state *scaler_state =
                &crtc_state->scaler_state;
@@ -4881,21 +4840,14 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
        struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
        const struct drm_display_mode *adjusted_mode =
                &crtc_state->base.adjusted_mode;
-       int need_scaling;
 
        /*
         * Src coordinates are already rotated by 270 degrees for
         * the 90/270 degree plane rotation cases (to match the
         * GTT mapping), hence no need to account for rotation here.
         */
-       need_scaling = src_w != dst_w || src_h != dst_h;
-
-       if (plane_scaler_check)
-               if (pixel_format == DRM_FORMAT_NV12)
-                       need_scaling = true;
-
-       if (crtc_state->ycbcr420 && scaler_user == SKL_CRTC_INDEX)
-               need_scaling = true;
+       if (src_w != dst_w || src_h != dst_h)
+               need_scaler = true;
 
        /*
         * Scaling/fitting not supported in IF-ID mode in GEN9+
@@ -4904,7 +4856,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
         * for NV12.
         */
        if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
-           need_scaling && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+           need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
                DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
                return -EINVAL;
        }
@@ -4919,7 +4871,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
         * update to free the scaler is done in plane/panel-fit programming.
         * For this purpose crtc/plane_state->scaler_id isn't reset here.
         */
-       if (force_detach || !need_scaling) {
+       if (force_detach || !need_scaler) {
                if (*scaler_id >= 0) {
                        scaler_state->scaler_users &= ~(1 << scaler_user);
                        scaler_state->scalers[*scaler_id].in_use = 0;
@@ -4933,7 +4885,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
                return 0;
        }
 
-       if (plane_scaler_check && pixel_format == DRM_FORMAT_NV12 &&
+       if (format && format->format == DRM_FORMAT_NV12 &&
            (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
                DRM_DEBUG_KMS("NV12: src dimensions not met\n");
                return -EINVAL;
@@ -4976,12 +4928,16 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
 int skl_update_scaler_crtc(struct intel_crtc_state *state)
 {
        const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
+       bool need_scaler = false;
+
+       if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+               need_scaler = true;
 
        return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
                                 &state->scaler_state.scaler_id,
                                 state->pipe_src_w, state->pipe_src_h,
                                 adjusted_mode->crtc_hdisplay,
-                                adjusted_mode->crtc_vdisplay, false, 0);
+                                adjusted_mode->crtc_vdisplay, NULL, need_scaler);
 }
 
 /**
@@ -4996,13 +4952,17 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
                                   struct intel_plane_state *plane_state)
 {
-
        struct intel_plane *intel_plane =
                to_intel_plane(plane_state->base.plane);
        struct drm_framebuffer *fb = plane_state->base.fb;
        int ret;
-
        bool force_detach = !fb || !plane_state->base.visible;
+       bool need_scaler = false;
+
+       /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
+       if (!icl_is_hdr_plane(intel_plane) &&
+           fb && fb->format->format == DRM_FORMAT_NV12)
+               need_scaler = true;
 
        ret = skl_update_scaler(crtc_state, force_detach,
                                drm_plane_index(&intel_plane->base),
@@ -5011,7 +4971,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
                                drm_rect_height(&plane_state->base.src) >> 16,
                                drm_rect_width(&plane_state->base.dst),
                                drm_rect_height(&plane_state->base.dst),
-                               fb ? true : false, fb ? fb->format->format : 0);
+                               fb ? fb->format : NULL, need_scaler);
 
        if (ret || plane_state->scaler_id < 0)
                return ret;
@@ -5057,23 +5017,30 @@ static void skylake_scaler_disable(struct intel_crtc *crtc)
                skl_detach_scaler(crtc, i);
 }
 
-static void skylake_pfit_enable(struct intel_crtc *crtc)
+static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int pipe = crtc->pipe;
-       struct intel_crtc_scaler_state *scaler_state =
-               &crtc->config->scaler_state;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+       const struct intel_crtc_scaler_state *scaler_state =
+               &crtc_state->scaler_state;
 
-       if (crtc->config->pch_pfit.enabled) {
+       if (crtc_state->pch_pfit.enabled) {
                u16 uv_rgb_hphase, uv_rgb_vphase;
+               int pfit_w, pfit_h, hscale, vscale;
                int id;
 
-               if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
+               if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
                        return;
 
-               uv_rgb_hphase = skl_scaler_calc_phase(1, false);
-               uv_rgb_vphase = skl_scaler_calc_phase(1, false);
+               pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
+               pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
+
+               hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
+               vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
+
+               uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
+               uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
 
                id = scaler_state->scaler_id;
                I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
@@ -5082,18 +5049,18 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
                              PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
                I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
                              PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
-               I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
-               I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
+               I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
+               I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
        }
 }
 
-static void ironlake_pfit_enable(struct intel_crtc *crtc)
+static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        int pipe = crtc->pipe;
 
-       if (crtc->config->pch_pfit.enabled) {
+       if (crtc_state->pch_pfit.enabled) {
                /* Force use of hard-coded filter coefficients
                 * as some pre-programmed values are broken,
                 * e.g. x201.
@@ -5103,8 +5070,8 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
                                                 PF_PIPE_SEL_IVB(pipe));
                else
                        I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
-               I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
-               I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
+               I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
+               I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
        }
 }
 
@@ -5299,11 +5266,8 @@ static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
        if (!crtc_state->nv12_planes)
                return false;
 
-       if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
-               return false;
-
-       if ((INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) ||
-           IS_CANNONLAKE(dev_priv))
+       /* WA Display #0827: Gen9:all */
+       if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
                return true;
 
        return false;
@@ -5346,7 +5310,6 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
        if (needs_nv12_wa(dev_priv, old_crtc_state) &&
            !needs_nv12_wa(dev_priv, pipe_config)) {
                skl_wa_clkgate(dev_priv, crtc->pipe, false);
-               skl_wa_528(dev_priv, crtc->pipe, false);
        }
 }
 
@@ -5386,7 +5349,6 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
        if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
            needs_nv12_wa(dev_priv, pipe_config)) {
                skl_wa_clkgate(dev_priv, crtc->pipe, true);
-               skl_wa_528(dev_priv, crtc->pipe, true);
        }
 
        /*
@@ -5409,7 +5371,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
         *
         * WaCxSRDisabledForSpriteScaling:ivb
         */
-       if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev))
+       if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
+           old_crtc_state->base.active)
                intel_wait_for_vblank(dev_priv, crtc->pipe);
 
        /*
@@ -5440,24 +5403,23 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
                intel_update_watermarks(crtc);
 }
 
-static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
+static void intel_crtc_disable_planes(struct intel_crtc *crtc, unsigned plane_mask)
 {
-       struct drm_device *dev = crtc->dev;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct drm_plane *p;
-       int pipe = intel_crtc->pipe;
+       struct drm_device *dev = crtc->base.dev;
+       struct intel_plane *plane;
+       unsigned fb_bits = 0;
 
-       intel_crtc_dpms_overlay_disable(intel_crtc);
+       intel_crtc_dpms_overlay_disable(crtc);
 
-       drm_for_each_plane_mask(p, dev, plane_mask)
-               to_intel_plane(p)->disable_plane(to_intel_plane(p), intel_crtc);
+       for_each_intel_plane_on_crtc(dev, crtc, plane) {
+               if (plane_mask & BIT(plane->id)) {
+                       plane->disable_plane(plane, crtc);
 
-       /*
-        * FIXME: Once we grow proper nuclear flip support out of this we need
-        * to compute the mask of flip planes precisely. For the time being
-        * consider this a flip to a NULL plane.
-        */
-       intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
+                       fb_bits |= plane->frontbuffer_bit;
+               }
+       }
+
+       intel_frontbuffer_flip(to_i915(dev), fb_bits);
 }
 
 static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
@@ -5515,7 +5477,8 @@ static void intel_encoders_enable(struct drm_crtc *crtc,
                if (conn_state->crtc != crtc)
                        continue;
 
-               encoder->enable(encoder, crtc_state, conn_state);
+               if (encoder->enable)
+                       encoder->enable(encoder, crtc_state, conn_state);
                intel_opregion_notify_encoder(encoder, true);
        }
 }
@@ -5536,7 +5499,8 @@ static void intel_encoders_disable(struct drm_crtc *crtc,
                        continue;
 
                intel_opregion_notify_encoder(encoder, false);
-               encoder->disable(encoder, old_crtc_state, old_conn_state);
+               if (encoder->disable)
+                       encoder->disable(encoder, old_crtc_state, old_conn_state);
        }
 }
 
@@ -5607,37 +5571,37 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
        intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
        intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
 
-       if (intel_crtc->config->has_pch_encoder)
-               intel_prepare_shared_dpll(intel_crtc);
+       if (pipe_config->has_pch_encoder)
+               intel_prepare_shared_dpll(pipe_config);
 
-       if (intel_crtc_has_dp_encoder(intel_crtc->config))
-               intel_dp_set_m_n(intel_crtc, M1_N1);
+       if (intel_crtc_has_dp_encoder(pipe_config))
+               intel_dp_set_m_n(pipe_config, M1_N1);
 
-       intel_set_pipe_timings(intel_crtc);
-       intel_set_pipe_src_size(intel_crtc);
+       intel_set_pipe_timings(pipe_config);
+       intel_set_pipe_src_size(pipe_config);
 
-       if (intel_crtc->config->has_pch_encoder) {
-               intel_cpu_transcoder_set_m_n(intel_crtc,
-                                    &intel_crtc->config->fdi_m_n, NULL);
+       if (pipe_config->has_pch_encoder) {
+               intel_cpu_transcoder_set_m_n(pipe_config,
+                                            &pipe_config->fdi_m_n, NULL);
        }
 
-       ironlake_set_pipeconf(crtc);
+       ironlake_set_pipeconf(pipe_config);
 
        intel_crtc->active = true;
 
        intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
-       if (intel_crtc->config->has_pch_encoder) {
+       if (pipe_config->has_pch_encoder) {
                /* Note: FDI PLL enabling _must_ be done before we enable the
                 * cpu pipes, hence this is separate from all the other fdi/pch
                 * enabling. */
-               ironlake_fdi_pll_enable(intel_crtc);
+               ironlake_fdi_pll_enable(pipe_config);
        } else {
                assert_fdi_tx_disabled(dev_priv, pipe);
                assert_fdi_rx_disabled(dev_priv, pipe);
        }
 
-       ironlake_pfit_enable(intel_crtc);
+       ironlake_pfit_enable(pipe_config);
 
        /*
         * On ILK+ LUT must be loaded before the pipe is running but with
@@ -5646,10 +5610,10 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
        intel_color_load_luts(&pipe_config->base);
 
        if (dev_priv->display.initial_watermarks != NULL)
-               dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
+               dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
        intel_enable_pipe(pipe_config);
 
-       if (intel_crtc->config->has_pch_encoder)
+       if (pipe_config->has_pch_encoder)
                ironlake_pch_enable(old_intel_state, pipe_config);
 
        assert_vblank_disabled(crtc);
@@ -5666,7 +5630,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
         * some interlaced HDMI modes. Let's do the double wait always
         * in case there are more corner cases we don't know about.
         */
-       if (intel_crtc->config->has_pch_encoder) {
+       if (pipe_config->has_pch_encoder) {
                intel_wait_for_vblank(dev_priv, pipe);
                intel_wait_for_vblank(dev_priv, pipe);
        }
@@ -5700,10 +5664,9 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
        enum pipe pipe = crtc->pipe;
        uint32_t val;
 
-       val = MBUS_DBOX_BW_CREDIT(1) | MBUS_DBOX_A_CREDIT(2);
-
-       /* Program B credit equally to all pipes */
-       val |= MBUS_DBOX_B_CREDIT(24 / INTEL_INFO(dev_priv)->num_pipes);
+       val = MBUS_DBOX_A_CREDIT(2);
+       val |= MBUS_DBOX_BW_CREDIT(1);
+       val |= MBUS_DBOX_B_CREDIT(8);
 
        I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
 }
@@ -5715,7 +5678,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe, hsw_workaround_pipe;
-       enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+       enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
        struct intel_atomic_state *old_intel_state =
                to_intel_atomic_state(old_state);
        bool psl_clkgate_wa;
@@ -5726,37 +5689,37 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
 
        intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
 
-       if (intel_crtc->config->shared_dpll)
-               intel_enable_shared_dpll(intel_crtc);
+       if (pipe_config->shared_dpll)
+               intel_enable_shared_dpll(pipe_config);
 
        if (INTEL_GEN(dev_priv) >= 11)
                icl_map_plls_to_ports(crtc, pipe_config, old_state);
 
        intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
-       if (intel_crtc_has_dp_encoder(intel_crtc->config))
-               intel_dp_set_m_n(intel_crtc, M1_N1);
+       if (intel_crtc_has_dp_encoder(pipe_config))
+               intel_dp_set_m_n(pipe_config, M1_N1);
 
        if (!transcoder_is_dsi(cpu_transcoder))
-               intel_set_pipe_timings(intel_crtc);
+               intel_set_pipe_timings(pipe_config);
 
-       intel_set_pipe_src_size(intel_crtc);
+       intel_set_pipe_src_size(pipe_config);
 
        if (cpu_transcoder != TRANSCODER_EDP &&
            !transcoder_is_dsi(cpu_transcoder)) {
                I915_WRITE(PIPE_MULT(cpu_transcoder),
-                          intel_crtc->config->pixel_multiplier - 1);
+                          pipe_config->pixel_multiplier - 1);
        }
 
-       if (intel_crtc->config->has_pch_encoder) {
-               intel_cpu_transcoder_set_m_n(intel_crtc,
-                                    &intel_crtc->config->fdi_m_n, NULL);
+       if (pipe_config->has_pch_encoder) {
+               intel_cpu_transcoder_set_m_n(pipe_config,
+                                            &pipe_config->fdi_m_n, NULL);
        }
 
        if (!transcoder_is_dsi(cpu_transcoder))
-               haswell_set_pipeconf(crtc);
+               haswell_set_pipeconf(pipe_config);
 
-       haswell_set_pipemisc(crtc);
+       haswell_set_pipemisc(pipe_config);
 
        intel_color_set_csc(&pipe_config->base);
 
@@ -5764,14 +5727,14 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
 
        /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
        psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
-                        intel_crtc->config->pch_pfit.enabled;
+                        pipe_config->pch_pfit.enabled;
        if (psl_clkgate_wa)
                glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
 
        if (INTEL_GEN(dev_priv) >= 9)
-               skylake_pfit_enable(intel_crtc);
+               skylake_pfit_enable(pipe_config);
        else
-               ironlake_pfit_enable(intel_crtc);
+               ironlake_pfit_enable(pipe_config);
 
        /*
         * On ILK+ LUT must be loaded before the pipe is running but with
@@ -5804,10 +5767,10 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
        if (!transcoder_is_dsi(cpu_transcoder))
                intel_enable_pipe(pipe_config);
 
-       if (intel_crtc->config->has_pch_encoder)
+       if (pipe_config->has_pch_encoder)
                lpt_pch_enable(old_intel_state, pipe_config);
 
-       if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
+       if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
                intel_ddi_set_vc_payload_alloc(pipe_config, true);
 
        assert_vblank_disabled(crtc);
@@ -5829,15 +5792,15 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
        }
 }
 
-static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
+static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
 {
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int pipe = crtc->pipe;
+       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
 
        /* To avoid upsetting the power well on haswell only disable the pfit if
         * it's in use. The hw state code will make sure we get this right. */
-       if (force || crtc->config->pch_pfit.enabled) {
+       if (old_crtc_state->pch_pfit.enabled) {
                I915_WRITE(PF_CTL(pipe), 0);
                I915_WRITE(PF_WIN_POS(pipe), 0);
                I915_WRITE(PF_WIN_SZ(pipe), 0);
@@ -5868,14 +5831,14 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
 
        intel_disable_pipe(old_crtc_state);
 
-       ironlake_pfit_disable(intel_crtc, false);
+       ironlake_pfit_disable(old_crtc_state);
 
-       if (intel_crtc->config->has_pch_encoder)
+       if (old_crtc_state->has_pch_encoder)
                ironlake_fdi_disable(crtc);
 
        intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 
-       if (intel_crtc->config->has_pch_encoder) {
+       if (old_crtc_state->has_pch_encoder) {
                ironlake_disable_pch_transcoder(dev_priv, pipe);
 
                if (HAS_PCH_CPT(dev_priv)) {
@@ -5929,21 +5892,22 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
        if (INTEL_GEN(dev_priv) >= 9)
                skylake_scaler_disable(intel_crtc);
        else
-               ironlake_pfit_disable(intel_crtc, false);
+               ironlake_pfit_disable(old_crtc_state);
 
        intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 
        if (INTEL_GEN(dev_priv) >= 11)
                icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state);
+
+       intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
 }
 
-static void i9xx_pfit_enable(struct intel_crtc *crtc)
+static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc_state *pipe_config = crtc->config;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-       if (!pipe_config->gmch_pfit.control)
+       if (!crtc_state->gmch_pfit.control)
                return;
 
        /*
@@ -5953,8 +5917,8 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
        WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
        assert_pipe_disabled(dev_priv, crtc->pipe);
 
-       I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
-       I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
+       I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
+       I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
 
        /* Border color in case we don't scale up to the full screen. Black by
         * default, change to something else for debugging. */
@@ -6009,6 +5973,28 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port)
        }
 }
 
+enum intel_display_power_domain
+intel_aux_power_domain(struct intel_digital_port *dig_port)
+{
+       switch (dig_port->aux_ch) {
+       case AUX_CH_A:
+               return POWER_DOMAIN_AUX_A;
+       case AUX_CH_B:
+               return POWER_DOMAIN_AUX_B;
+       case AUX_CH_C:
+               return POWER_DOMAIN_AUX_C;
+       case AUX_CH_D:
+               return POWER_DOMAIN_AUX_D;
+       case AUX_CH_E:
+               return POWER_DOMAIN_AUX_E;
+       case AUX_CH_F:
+               return POWER_DOMAIN_AUX_F;
+       default:
+               MISSING_CASE(dig_port->aux_ch);
+               return POWER_DOMAIN_AUX_A;
+       }
+}
+
 static u64 get_crtc_power_domains(struct drm_crtc *crtc,
                                  struct intel_crtc_state *crtc_state)
 {
@@ -6088,20 +6074,18 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
        if (WARN_ON(intel_crtc->active))
                return;
 
-       if (intel_crtc_has_dp_encoder(intel_crtc->config))
-               intel_dp_set_m_n(intel_crtc, M1_N1);
+       if (intel_crtc_has_dp_encoder(pipe_config))
+               intel_dp_set_m_n(pipe_config, M1_N1);
 
-       intel_set_pipe_timings(intel_crtc);
-       intel_set_pipe_src_size(intel_crtc);
+       intel_set_pipe_timings(pipe_config);
+       intel_set_pipe_src_size(pipe_config);
 
        if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
-               struct drm_i915_private *dev_priv = to_i915(dev);
-
                I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
                I915_WRITE(CHV_CANVAS(pipe), 0);
        }
 
-       i9xx_set_pipeconf(intel_crtc);
+       i9xx_set_pipeconf(pipe_config);
 
        intel_color_set_csc(&pipe_config->base);
 
@@ -6112,16 +6096,16 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
        intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
 
        if (IS_CHERRYVIEW(dev_priv)) {
-               chv_prepare_pll(intel_crtc, intel_crtc->config);
-               chv_enable_pll(intel_crtc, intel_crtc->config);
+               chv_prepare_pll(intel_crtc, pipe_config);
+               chv_enable_pll(intel_crtc, pipe_config);
        } else {
-               vlv_prepare_pll(intel_crtc, intel_crtc->config);
-               vlv_enable_pll(intel_crtc, intel_crtc->config);
+               vlv_prepare_pll(intel_crtc, pipe_config);
+               vlv_enable_pll(intel_crtc, pipe_config);
        }
 
        intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
-       i9xx_pfit_enable(intel_crtc);
+       i9xx_pfit_enable(pipe_config);
 
        intel_color_load_luts(&pipe_config->base);
 
@@ -6135,13 +6119,13 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
        intel_encoders_enable(crtc, pipe_config, old_state);
 }
 
-static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
+static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-       I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
-       I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
+       I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
+       I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
 }
 
 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
@@ -6158,15 +6142,15 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
        if (WARN_ON(intel_crtc->active))
                return;
 
-       i9xx_set_pll_dividers(intel_crtc);
+       i9xx_set_pll_dividers(pipe_config);
 
-       if (intel_crtc_has_dp_encoder(intel_crtc->config))
-               intel_dp_set_m_n(intel_crtc, M1_N1);
+       if (intel_crtc_has_dp_encoder(pipe_config))
+               intel_dp_set_m_n(pipe_config, M1_N1);
 
-       intel_set_pipe_timings(intel_crtc);
-       intel_set_pipe_src_size(intel_crtc);
+       intel_set_pipe_timings(pipe_config);
+       intel_set_pipe_src_size(pipe_config);
 
-       i9xx_set_pipeconf(intel_crtc);
+       i9xx_set_pipeconf(pipe_config);
 
        intel_crtc->active = true;
 
@@ -6177,13 +6161,13 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
 
        i9xx_enable_pll(intel_crtc, pipe_config);
 
-       i9xx_pfit_enable(intel_crtc);
+       i9xx_pfit_enable(pipe_config);
 
        intel_color_load_luts(&pipe_config->base);
 
        if (dev_priv->display.initial_watermarks != NULL)
                dev_priv->display.initial_watermarks(old_intel_state,
-                                                    intel_crtc->config);
+                                                    pipe_config);
        else
                intel_update_watermarks(intel_crtc);
        intel_enable_pipe(pipe_config);
@@ -6194,12 +6178,12 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
        intel_encoders_enable(crtc, pipe_config, old_state);
 }
 
-static void i9xx_pfit_disable(struct intel_crtc *crtc)
+static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
 {
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-       if (!crtc->config->gmch_pfit.control)
+       if (!old_crtc_state->gmch_pfit.control)
                return;
 
        assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -6232,17 +6216,17 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
 
        intel_disable_pipe(old_crtc_state);
 
-       i9xx_pfit_disable(intel_crtc);
+       i9xx_pfit_disable(old_crtc_state);
 
        intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 
-       if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
+       if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
                if (IS_CHERRYVIEW(dev_priv))
                        chv_disable_pll(dev_priv, pipe);
                else if (IS_VALLEYVIEW(dev_priv))
                        vlv_disable_pll(dev_priv, pipe);
                else
-                       i9xx_disable_pll(intel_crtc);
+                       i9xx_disable_pll(old_crtc_state);
        }
 
        intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
@@ -6316,7 +6300,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
 
        intel_fbc_disable(intel_crtc);
        intel_update_watermarks(intel_crtc);
-       intel_disable_shared_dpll(intel_crtc);
+       intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
 
        domains = intel_crtc->enabled_power_domains;
        for_each_power_domain(domain, domains)
@@ -6394,66 +6378,6 @@ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
        }
 }
 
-int intel_connector_init(struct intel_connector *connector)
-{
-       struct intel_digital_connector_state *conn_state;
-
-       /*
-        * Allocate enough memory to hold intel_digital_connector_state,
-        * This might be a few bytes too many, but for connectors that don't
-        * need it we'll free the state and allocate a smaller one on the first
-        * succesful commit anyway.
-        */
-       conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
-       if (!conn_state)
-               return -ENOMEM;
-
-       __drm_atomic_helper_connector_reset(&connector->base,
-                                           &conn_state->base);
-
-       return 0;
-}
-
-struct intel_connector *intel_connector_alloc(void)
-{
-       struct intel_connector *connector;
-
-       connector = kzalloc(sizeof *connector, GFP_KERNEL);
-       if (!connector)
-               return NULL;
-
-       if (intel_connector_init(connector) < 0) {
-               kfree(connector);
-               return NULL;
-       }
-
-       return connector;
-}
-
-/*
- * Free the bits allocated by intel_connector_alloc.
- * This should only be used after intel_connector_alloc has returned
- * successfully, and before drm_connector_init returns successfully.
- * Otherwise the destroy callbacks for the connector and the state should
- * take care of proper cleanup/free
- */
-void intel_connector_free(struct intel_connector *connector)
-{
-       kfree(to_intel_digital_connector_state(connector->base.state));
-       kfree(connector);
-}
-
-/* Simple connector->get_hw_state implementation for encoders that support only
- * one connector and no cloning and hence the encoder state determines the state
- * of the connector. */
-bool intel_connector_get_hw_state(struct intel_connector *connector)
-{
-       enum pipe pipe = 0;
-       struct intel_encoder *encoder = connector->encoder;
-
-       return encoder->get_hw_state(encoder, &pipe);
-}
-
 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
 {
        if (crtc_state->base.enable && crtc_state->has_pch_encoder)
@@ -6564,6 +6488,9 @@ retry:
                               link_bw, &pipe_config->fdi_m_n, false);
 
        ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
+       if (ret == -EDEADLK)
+               return ret;
+
        if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
                pipe_config->pipe_bpp -= 2*3;
                DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
@@ -6720,7 +6647,9 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
                return -EINVAL;
        }
 
-       if (pipe_config->ycbcr420 && pipe_config->base.ctm) {
+       if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+            pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
+            pipe_config->base.ctm) {
                /*
                 * There is only one pipe CSC unit per pipe, and we need that
                 * for output conversion from RGB->YCBCR. So if CTM is already
@@ -6886,12 +6815,12 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
        vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
 }
 
-static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
-                                        struct intel_link_m_n *m_n)
+static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
+                                        const struct intel_link_m_n *m_n)
 {
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int pipe = crtc->pipe;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
 
        I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
        I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
@@ -6899,25 +6828,39 @@ static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
        I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
 }
 
-static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
-                                        struct intel_link_m_n *m_n,
-                                        struct intel_link_m_n *m2_n2)
+static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
+                                enum transcoder transcoder)
+{
+       if (IS_HASWELL(dev_priv))
+               return transcoder == TRANSCODER_EDP;
+
+       /*
+        * Strictly speaking some registers are available before
+        * gen7, but we only support DRRS on gen7+
+        */
+       return IS_GEN7(dev_priv) || IS_CHERRYVIEW(dev_priv);
+}
+
+static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
+                                        const struct intel_link_m_n *m_n,
+                                        const struct intel_link_m_n *m2_n2)
 {
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       int pipe = crtc->pipe;
-       enum transcoder transcoder = crtc->config->cpu_transcoder;
+       enum pipe pipe = crtc->pipe;
+       enum transcoder transcoder = crtc_state->cpu_transcoder;
 
        if (INTEL_GEN(dev_priv) >= 5) {
                I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
                I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
                I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
                I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
-               /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
-                * for gen < 8) and if DRRS is supported (to make sure the
-                * registers are not unnecessarily accessed).
+               /*
+                *  M2_N2 registers are set only if DRRS is supported
+                * (to make sure the registers are not unnecessarily accessed).
                 */
-               if (m2_n2 && (IS_CHERRYVIEW(dev_priv) ||
-                   INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) {
+               if (m2_n2 && crtc_state->has_drrs &&
+                   transcoder_has_m2_n2(dev_priv, transcoder)) {
                        I915_WRITE(PIPE_DATA_M2(transcoder),
                                        TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
                        I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
@@ -6932,29 +6875,29 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
        }
 }
 
-void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
+void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
 {
-       struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
+       const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
 
        if (m_n == M1_N1) {
-               dp_m_n = &crtc->config->dp_m_n;
-               dp_m2_n2 = &crtc->config->dp_m2_n2;
+               dp_m_n = &crtc_state->dp_m_n;
+               dp_m2_n2 = &crtc_state->dp_m2_n2;
        } else if (m_n == M2_N2) {
 
                /*
                 * M2_N2 registers are not supported. Hence m2_n2 divider value
                 * needs to be programmed into M1_N1.
                 */
-               dp_m_n = &crtc->config->dp_m2_n2;
+               dp_m_n = &crtc_state->dp_m2_n2;
        } else {
                DRM_ERROR("Unsupported divider value\n");
                return;
        }
 
-       if (crtc->config->has_pch_encoder)
-               intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
+       if (crtc_state->has_pch_encoder)
+               intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
        else
-               intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
+               intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
 }
 
 static void vlv_compute_dpll(struct intel_crtc *crtc,
@@ -7053,8 +6996,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
 
        /* Set HBR and RBR LPF coefficients */
        if (pipe_config->port_clock == 162000 ||
-           intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
-           intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
+           intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
+           intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
                vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
                                 0x009f0003);
        else
@@ -7081,7 +7024,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
 
        coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
        coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
-       if (intel_crtc_has_dp_encoder(crtc->config))
+       if (intel_crtc_has_dp_encoder(pipe_config))
                coreclk |= 0x01000000;
        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
 
@@ -7360,12 +7303,13 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
        crtc_state->dpll_hw_state.dpll = dpll;
 }
 
-static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
+static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
-       enum pipe pipe = intel_crtc->pipe;
-       enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
-       const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+       const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
        uint32_t crtc_vtotal, crtc_vblank_end;
        int vsyncshift = 0;
 
@@ -7379,7 +7323,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
                crtc_vtotal -= 1;
                crtc_vblank_end -= 1;
 
-               if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
+               if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
                        vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
                else
                        vsyncshift = adjusted_mode->crtc_hsync_start -
@@ -7421,18 +7365,18 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
 
 }
 
-static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
+static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       enum pipe pipe = intel_crtc->pipe;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
 
        /* pipesrc controls the size that is scaled from, which should
         * always be the user's requested size.
         */
        I915_WRITE(PIPESRC(pipe),
-                  ((intel_crtc->config->pipe_src_w - 1) << 16) |
-                  (intel_crtc->config->pipe_src_h - 1));
+                  ((crtc_state->pipe_src_w - 1) << 16) |
+                  (crtc_state->pipe_src_h - 1));
 }
 
 static void intel_get_pipe_timings(struct intel_crtc *crtc,
@@ -7508,29 +7452,30 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
        drm_mode_set_name(mode);
 }
 
-static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
+static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        uint32_t pipeconf;
 
        pipeconf = 0;
 
        /* we keep both pipes enabled on 830 */
        if (IS_I830(dev_priv))
-               pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
+               pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
 
-       if (intel_crtc->config->double_wide)
+       if (crtc_state->double_wide)
                pipeconf |= PIPECONF_DOUBLE_WIDE;
 
        /* only g4x and later have fancy bpc/dither controls */
        if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
            IS_CHERRYVIEW(dev_priv)) {
                /* Bspec claims that we can't use dithering for 30bpp pipes. */
-               if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
+               if (crtc_state->dither && crtc_state->pipe_bpp != 30)
                        pipeconf |= PIPECONF_DITHER_EN |
                                    PIPECONF_DITHER_TYPE_SP;
 
-               switch (intel_crtc->config->pipe_bpp) {
+               switch (crtc_state->pipe_bpp) {
                case 18:
                        pipeconf |= PIPECONF_6BPC;
                        break;
@@ -7546,9 +7491,9 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
                }
        }
 
-       if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+       if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
                if (INTEL_GEN(dev_priv) < 4 ||
-                   intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
+                   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
                        pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
                else
                        pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
@@ -7556,11 +7501,11 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
                pipeconf |= PIPECONF_PROGRESSIVE;
 
        if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
-            intel_crtc->config->limited_color_range)
+            crtc_state->limited_color_range)
                pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
 
-       I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
-       POSTING_READ(PIPECONF(intel_crtc->pipe));
+       I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
+       POSTING_READ(PIPECONF(crtc->pipe));
 }
 
 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
@@ -7843,8 +7788,15 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
                        plane_config->tiling = I915_TILING_X;
                        fb->modifier = I915_FORMAT_MOD_X_TILED;
                }
+
+               if (val & DISPPLANE_ROTATE_180)
+                       plane_config->rotation = DRM_MODE_ROTATE_180;
        }
 
+       if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
+           val & DISPPLANE_MIRROR)
+               plane_config->rotation |= DRM_MODE_REFLECT_X;
+
        pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
        fourcc = i9xx_format_to_fourcc(pixel_format);
        fb->format = drm_format_info(fourcc);
@@ -7916,6 +7868,49 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
        pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
 }
 
+static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
+                                       struct intel_crtc_state *pipe_config)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
+
+       pipe_config->lspcon_downsampling = false;
+
+       if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
+               u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
+
+               if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
+                       bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
+                       bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
+
+                       if (ycbcr420_enabled) {
+                               /* We support 4:2:0 in full blend mode only */
+                               if (!blend)
+                                       output = INTEL_OUTPUT_FORMAT_INVALID;
+                               else if (!(IS_GEMINILAKE(dev_priv) ||
+                                          INTEL_GEN(dev_priv) >= 10))
+                                       output = INTEL_OUTPUT_FORMAT_INVALID;
+                               else
+                                       output = INTEL_OUTPUT_FORMAT_YCBCR420;
+                       } else {
+                               /*
+                                * Currently there is no interface defined to
+                                * check user preference between RGB/YCBCR444
+                                * or YCBCR420. So the only possible case for
+                                * YCBCR444 usage is driving YCBCR420 output
+                                * with LSPCON, when pipe is configured for
+                                * YCBCR444 output and LSPCON takes care of
+                                * downsampling it.
+                                */
+                               pipe_config->lspcon_downsampling = true;
+                               output = INTEL_OUTPUT_FORMAT_YCBCR444;
+                       }
+               }
+       }
+
+       pipe_config->output_format = output;
+}
+
 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
                                 struct intel_crtc_state *pipe_config)
 {
@@ -7928,6 +7923,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
 
+       pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
        pipe_config->shared_dpll = NULL;
 
@@ -8459,16 +8455,16 @@ void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
                lpt_init_pch_refclk(dev_priv);
 }
 
-static void ironlake_set_pipeconf(struct drm_crtc *crtc)
+static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
        uint32_t val;
 
        val = 0;
 
-       switch (intel_crtc->config->pipe_bpp) {
+       switch (crtc_state->pipe_bpp) {
        case 18:
                val |= PIPECONF_6BPC;
                break;
@@ -8486,32 +8482,32 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc)
                BUG();
        }
 
-       if (intel_crtc->config->dither)
+       if (crtc_state->dither)
                val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
 
-       if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+       if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
                val |= PIPECONF_INTERLACED_ILK;
        else
                val |= PIPECONF_PROGRESSIVE;
 
-       if (intel_crtc->config->limited_color_range)
+       if (crtc_state->limited_color_range)
                val |= PIPECONF_COLOR_RANGE_SELECT;
 
        I915_WRITE(PIPECONF(pipe), val);
        POSTING_READ(PIPECONF(pipe));
 }
 
-static void haswell_set_pipeconf(struct drm_crtc *crtc)
+static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
        u32 val = 0;
 
-       if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
+       if (IS_HASWELL(dev_priv) && crtc_state->dither)
                val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
 
-       if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+       if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
                val |= PIPECONF_INTERLACED_ILK;
        else
                val |= PIPECONF_PROGRESSIVE;
@@ -8520,16 +8516,15 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
        POSTING_READ(PIPECONF(cpu_transcoder));
 }
 
-static void haswell_set_pipemisc(struct drm_crtc *crtc)
+static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_crtc_state *config = intel_crtc->config;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
 
        if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
                u32 val = 0;
 
-               switch (intel_crtc->config->pipe_bpp) {
+               switch (crtc_state->pipe_bpp) {
                case 18:
                        val |= PIPEMISC_DITHER_6_BPC;
                        break;
@@ -8547,14 +8542,16 @@ static void haswell_set_pipemisc(struct drm_crtc *crtc)
                        BUG();
                }
 
-               if (intel_crtc->config->dither)
+               if (crtc_state->dither)
                        val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
 
-               if (config->ycbcr420) {
-                       val |= PIPEMISC_OUTPUT_COLORSPACE_YUV |
-                               PIPEMISC_YUV420_ENABLE |
+               if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+                   crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
+                       val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
+
+               if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+                       val |= PIPEMISC_YUV420_ENABLE |
                                PIPEMISC_YUV420_MODE_FULL_BLEND;
-               }
 
                I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
        }
@@ -8765,12 +8762,8 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
                m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
                m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
                            & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
-               /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
-                * gen < 8) and if DRRS is supported (to make sure the
-                * registers are not unnecessarily read).
-                */
-               if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
-                       crtc->config->has_drrs) {
+
+               if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
                        m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
                        m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
                        m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
@@ -8913,6 +8906,29 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
                goto error;
        }
 
+       /*
+        * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
+        * while i915 HW rotation is clockwise, thats why this swapping.
+        */
+       switch (val & PLANE_CTL_ROTATE_MASK) {
+       case PLANE_CTL_ROTATE_0:
+               plane_config->rotation = DRM_MODE_ROTATE_0;
+               break;
+       case PLANE_CTL_ROTATE_90:
+               plane_config->rotation = DRM_MODE_ROTATE_270;
+               break;
+       case PLANE_CTL_ROTATE_180:
+               plane_config->rotation = DRM_MODE_ROTATE_180;
+               break;
+       case PLANE_CTL_ROTATE_270:
+               plane_config->rotation = DRM_MODE_ROTATE_90;
+               break;
+       }
+
+       if (INTEL_GEN(dev_priv) >= 10 &&
+           val & PLANE_CTL_FLIP_HORIZONTAL)
+               plane_config->rotation |= DRM_MODE_REFLECT_X;
+
        base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
        plane_config->base = base;
 
@@ -8979,6 +8995,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
 
+       pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
        pipe_config->shared_dpll = NULL;
 
@@ -9327,30 +9344,17 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
        u32 temp;
 
        /* TODO: TBT pll not implemented. */
-       switch (port) {
-       case PORT_A:
-       case PORT_B:
+       if (intel_port_is_combophy(dev_priv, port)) {
                temp = I915_READ(DPCLKA_CFGCR0_ICL) &
                       DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
                id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
 
-               if (WARN_ON(id != DPLL_ID_ICL_DPLL0 && id != DPLL_ID_ICL_DPLL1))
+               if (WARN_ON(!intel_dpll_is_combophy(id)))
                        return;
-               break;
-       case PORT_C:
-               id = DPLL_ID_ICL_MGPLL1;
-               break;
-       case PORT_D:
-               id = DPLL_ID_ICL_MGPLL2;
-               break;
-       case PORT_E:
-               id = DPLL_ID_ICL_MGPLL3;
-               break;
-       case PORT_F:
-               id = DPLL_ID_ICL_MGPLL4;
-               break;
-       default:
-               MISSING_CASE(port);
+       } else if (intel_port_is_tc(dev_priv, port)) {
+               id = icl_port_to_mg_pll_id(port);
+       } else {
+               WARN(1, "Invalid port %x\n", port);
                return;
        }
 
@@ -9613,27 +9617,11 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
        }
 
        intel_get_pipe_src_size(crtc, pipe_config);
+       intel_get_crtc_ycbcr_config(crtc, pipe_config);
 
        pipe_config->gamma_mode =
                I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
 
-       if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
-               u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
-               bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV;
-
-               if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
-                       bool blend_mode_420 = tmp &
-                                             PIPEMISC_YUV420_MODE_FULL_BLEND;
-
-                       pipe_config->ycbcr420 = tmp & PIPEMISC_YUV420_ENABLE;
-                       if (pipe_config->ycbcr420 != clrspace_yuv ||
-                           pipe_config->ycbcr420 != blend_mode_420)
-                               DRM_DEBUG_KMS("Bad 4:2:0 mode (%08x)\n", tmp);
-               } else if (clrspace_yuv) {
-                       DRM_DEBUG_KMS("YCbCr 4:2:0 Unsupported\n");
-               }
-       }
-
        power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
        if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
                power_domain_mask |= BIT_ULL(power_domain);
@@ -9902,8 +9890,6 @@ static void i845_update_cursor(struct intel_plane *plane,
                I915_WRITE_FW(CURPOS(PIPE_A), pos);
        }
 
-       POSTING_READ_FW(CURCNTR(PIPE_A));
-
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
@@ -10132,8 +10118,6 @@ static void i9xx_update_cursor(struct intel_plane *plane,
                I915_WRITE_FW(CURBASE(pipe), base);
        }
 
-       POSTING_READ_FW(CURBASE(pipe));
-
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
@@ -10738,14 +10722,40 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
                pipe_config->fb_bits |= plane->frontbuffer_bit;
 
        /*
+        * ILK/SNB DVSACNTR/Sprite Enable
+        * IVB SPR_CTL/Sprite Enable
+        * "When in Self Refresh Big FIFO mode, a write to enable the
+        *  plane will be internally buffered and delayed while Big FIFO
+        *  mode is exiting."
+        *
+        * Which means that enabling the sprite can take an extra frame
+        * when we start in big FIFO mode (LP1+). Thus we need to drop
+        * down to LP0 and wait for vblank in order to make sure the
+        * sprite gets enabled on the next vblank after the register write.
+        * Doing otherwise would risk enabling the sprite one frame after
+        * we've already signalled flip completion. We can resume LP1+
+        * once the sprite has been enabled.
+        *
+        *
         * WaCxSRDisabledForSpriteScaling:ivb
+        * IVB SPR_SCALE/Scaling Enable
+        * "Low Power watermarks must be disabled for at least one
+        *  frame before enabling sprite scaling, and kept disabled
+        *  until sprite scaling is disabled."
         *
-        * cstate->update_wm was already set above, so this flag will
-        * take effect when we commit and program watermarks.
+        * ILK/SNB DVSASCALE/Scaling Enable
+        * "When in Self Refresh Big FIFO mode, scaling enable will be
+        *  masked off while Big FIFO mode is exiting."
+        *
+        * Despite the w/a only being listed for IVB we assume that
+        * the ILK/SNB note has similar ramifications, hence we apply
+        * the w/a on all three platforms.
         */
-       if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) &&
-           needs_scaling(to_intel_plane_state(plane_state)) &&
-           !needs_scaling(old_plane_state))
+       if (plane->id == PLANE_SPRITE0 &&
+           (IS_GEN5(dev_priv) || IS_GEN6(dev_priv) ||
+            IS_IVYBRIDGE(dev_priv)) &&
+           (turn_on || (!needs_scaling(old_plane_state) &&
+                        needs_scaling(to_intel_plane_state(plane_state)))))
                pipe_config->disable_lp_wm = true;
 
        return 0;
@@ -10781,6 +10791,98 @@ static bool check_single_encoder_cloning(struct drm_atomic_state *state,
        return true;
 }
 
+static int icl_add_linked_planes(struct intel_atomic_state *state)
+{
+       struct intel_plane *plane, *linked;
+       struct intel_plane_state *plane_state, *linked_plane_state;
+       int i;
+
+       for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+               linked = plane_state->linked_plane;
+
+               if (!linked)
+                       continue;
+
+               linked_plane_state = intel_atomic_get_plane_state(state, linked);
+               if (IS_ERR(linked_plane_state))
+                       return PTR_ERR(linked_plane_state);
+
+               WARN_ON(linked_plane_state->linked_plane != plane);
+               WARN_ON(linked_plane_state->slave == plane_state->slave);
+       }
+
+       return 0;
+}
+
+static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
+       struct intel_plane *plane, *linked;
+       struct intel_plane_state *plane_state;
+       int i;
+
+       if (INTEL_GEN(dev_priv) < 11)
+               return 0;
+
+       /*
+        * Destroy all old plane links and make the slave plane invisible
+        * in the crtc_state->active_planes mask.
+        */
+       for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+               if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
+                       continue;
+
+               plane_state->linked_plane = NULL;
+               if (plane_state->slave && !plane_state->base.visible)
+                       crtc_state->active_planes &= ~BIT(plane->id);
+
+               plane_state->slave = false;
+       }
+
+       if (!crtc_state->nv12_planes)
+               return 0;
+
+       for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+               struct intel_plane_state *linked_state = NULL;
+
+               if (plane->pipe != crtc->pipe ||
+                   !(crtc_state->nv12_planes & BIT(plane->id)))
+                       continue;
+
+               for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
+                       if (!icl_is_nv12_y_plane(linked->id))
+                               continue;
+
+                       if (crtc_state->active_planes & BIT(linked->id))
+                               continue;
+
+                       linked_state = intel_atomic_get_plane_state(state, linked);
+                       if (IS_ERR(linked_state))
+                               return PTR_ERR(linked_state);
+
+                       break;
+               }
+
+               if (!linked_state) {
+                       DRM_DEBUG_KMS("Need %d free Y planes for NV12\n",
+                                     hweight8(crtc_state->nv12_planes));
+
+                       return -EINVAL;
+               }
+
+               plane_state->linked_plane = linked;
+
+               linked_state->slave = true;
+               linked_state->linked_plane = plane;
+               crtc_state->active_planes |= BIT(linked->id);
+               DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
+       }
+
+       return 0;
+}
+
 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
                                   struct drm_crtc_state *crtc_state)
 {
@@ -10789,7 +10891,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_crtc_state *pipe_config =
                to_intel_crtc_state(crtc_state);
-       struct drm_atomic_state *state = crtc_state->state;
        int ret;
        bool mode_changed = needs_modeset(crtc_state);
 
@@ -10826,8 +10927,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
                }
        }
 
-       if (dev_priv->display.compute_intermediate_wm &&
-           !to_intel_atomic_state(state)->skip_intermediate_wm) {
+       if (dev_priv->display.compute_intermediate_wm) {
                if (WARN_ON(!dev_priv->display.compute_pipe_wm))
                        return 0;
 
@@ -10843,15 +10943,14 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
                        DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
                        return ret;
                }
-       } else if (dev_priv->display.compute_intermediate_wm) {
-               if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
-                       pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
        }
 
        if (INTEL_GEN(dev_priv) >= 9) {
                if (mode_changed)
                        ret = skl_update_scaler_crtc(pipe_config);
 
+               if (!ret)
+                       ret = icl_check_nv12_planes(pipe_config);
                if (!ret)
                        ret = skl_check_pipe_max_pixel_rate(intel_crtc,
                                                            pipe_config);
@@ -10867,8 +10966,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
 }
 
 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
-       .atomic_begin = intel_begin_crtc_commit,
-       .atomic_flush = intel_finish_crtc_commit,
        .atomic_check = intel_crtc_atomic_check,
 };
 
@@ -10897,30 +10994,42 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
        drm_connector_list_iter_end(&conn_iter);
 }
 
-static void
-connected_sink_compute_bpp(struct intel_connector *connector,
-                          struct intel_crtc_state *pipe_config)
+static int
+compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
+                     struct intel_crtc_state *pipe_config)
 {
-       const struct drm_display_info *info = &connector->base.display_info;
-       int bpp = pipe_config->pipe_bpp;
+       struct drm_connector *connector = conn_state->connector;
+       const struct drm_display_info *info = &connector->display_info;
+       int bpp;
 
-       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
-                     connector->base.base.id,
-                     connector->base.name);
-
-       /* Don't use an invalid EDID bpc value */
-       if (info->bpc != 0 && info->bpc * 3 < bpp) {
-               DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
-                             bpp, info->bpc * 3);
-               pipe_config->pipe_bpp = info->bpc * 3;
+       switch (conn_state->max_bpc) {
+       case 6 ... 7:
+               bpp = 6 * 3;
+               break;
+       case 8 ... 9:
+               bpp = 8 * 3;
+               break;
+       case 10 ... 11:
+               bpp = 10 * 3;
+               break;
+       case 12:
+               bpp = 12 * 3;
+               break;
+       default:
+               return -EINVAL;
        }
 
-       /* Clamp bpp to 8 on screens without EDID 1.4 */
-       if (info->bpc == 0 && bpp > 24) {
-               DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
-                             bpp);
-               pipe_config->pipe_bpp = 24;
+       if (bpp < pipe_config->pipe_bpp) {
+               DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
+                             "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
+                             connector->base.id, connector->name,
+                             bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
+                             pipe_config->pipe_bpp);
+
+               pipe_config->pipe_bpp = bpp;
        }
+
+       return 0;
 }
 
 static int
@@ -10928,7 +11037,7 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
                          struct intel_crtc_state *pipe_config)
 {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct drm_atomic_state *state;
+       struct drm_atomic_state *state = pipe_config->base.state;
        struct drm_connector *connector;
        struct drm_connector_state *connector_state;
        int bpp, i;
@@ -10941,21 +11050,21 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
        else
                bpp = 8*3;
 
-
        pipe_config->pipe_bpp = bpp;
 
-       state = pipe_config->base.state;
-
-       /* Clamp display bpp to EDID value */
+       /* Clamp display bpp to connector max bpp */
        for_each_new_connector_in_state(state, connector, connector_state, i) {
+               int ret;
+
                if (connector_state->crtc != &crtc->base)
                        continue;
 
-               connected_sink_compute_bpp(to_intel_connector(connector),
-                                          pipe_config);
+               ret = compute_sink_pipe_bpp(connector_state, pipe_config);
+               if (ret)
+                       return ret;
        }
 
-       return bpp;
+       return 0;
 }
 
 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
@@ -11025,6 +11134,20 @@ static void snprintf_output_types(char *buf, size_t len,
        WARN_ON_ONCE(output_types != 0);
 }
 
+static const char * const output_format_str[] = {
+       [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
+       [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
+       [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
+       [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
+};
+
+static const char *output_formats(enum intel_output_format format)
+{
+       if (format >= ARRAY_SIZE(output_format_str))
+               format = INTEL_OUTPUT_FORMAT_INVALID;
+       return output_format_str[format];
+}
+
 static void intel_dump_pipe_config(struct intel_crtc *crtc,
                                   struct intel_crtc_state *pipe_config,
                                   const char *context)
@@ -11044,6 +11167,9 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
        DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
                      buf, pipe_config->output_types);
 
+       DRM_DEBUG_KMS("output format: %s\n",
+                     output_formats(pipe_config->output_format));
+
        DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
                      transcoder_name(pipe_config->cpu_transcoder),
                      pipe_config->pipe_bpp, pipe_config->dither);
@@ -11053,9 +11179,6 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
                                      pipe_config->fdi_lanes,
                                      &pipe_config->fdi_m_n);
 
-       if (pipe_config->ycbcr420)
-               DRM_DEBUG_KMS("YCbCr 4:2:0 output enabled\n");
-
        if (intel_crtc_has_dp_encoder(pipe_config)) {
                intel_dump_m_n_config(pipe_config, "dp m_n",
                                pipe_config->lane_count, &pipe_config->dp_m_n);
@@ -11244,7 +11367,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
        struct intel_encoder *encoder;
        struct drm_connector *connector;
        struct drm_connector_state *connector_state;
-       int base_bpp, ret = -EINVAL;
+       int base_bpp, ret;
        int i;
        bool retry = true;
 
@@ -11266,10 +11389,12 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
              (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
                pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
 
-       base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
-                                            pipe_config);
-       if (base_bpp < 0)
-               goto fail;
+       ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
+                                       pipe_config);
+       if (ret)
+               return ret;
+
+       base_bpp = pipe_config->pipe_bpp;
 
        /*
         * Determine the real pipe dimensions. Note that stereo modes can
@@ -11291,7 +11416,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
 
                if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
                        DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
-                       goto fail;
+                       return -EINVAL;
                }
 
                /*
@@ -11327,7 +11452,7 @@ encoder_retry:
 
                if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
                        DRM_DEBUG_KMS("Encoder config failure\n");
-                       goto fail;
+                       return -EINVAL;
                }
        }
 
@@ -11338,16 +11463,16 @@ encoder_retry:
                        * pipe_config->pixel_multiplier;
 
        ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
+       if (ret == -EDEADLK)
+               return ret;
        if (ret < 0) {
                DRM_DEBUG_KMS("CRTC fixup failed\n");
-               goto fail;
+               return ret;
        }
 
        if (ret == RETRY) {
-               if (WARN(!retry, "loop in pipe configuration computation\n")) {
-                       ret = -EINVAL;
-                       goto fail;
-               }
+               if (WARN(!retry, "loop in pipe configuration computation\n"))
+                       return -EINVAL;
 
                DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
                retry = false;
@@ -11363,8 +11488,7 @@ encoder_retry:
        DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
                      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
 
-fail:
-       return ret;
+       return 0;
 }
 
 static bool intel_fuzzy_clock_check(int clock1, int clock2)
@@ -11633,6 +11757,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
 
        PIPE_CONF_CHECK_I(pixel_multiplier);
+       PIPE_CONF_CHECK_I(output_format);
        PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
        if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
            IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -11641,7 +11766,6 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
        PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
        PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
        PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
-       PIPE_CONF_CHECK_BOOL(ycbcr420);
 
        PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
 
@@ -12150,8 +12274,9 @@ intel_modeset_verify_disabled(struct drm_device *dev,
        verify_disabled_dpll_state(dev);
 }
 
-static void update_scanline_offset(struct intel_crtc *crtc)
+static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
 {
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
        /*
@@ -12182,7 +12307,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
         * answer that's slightly in the future.
         */
        if (IS_GEN2(dev_priv)) {
-               const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+               const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
                int vtotal;
 
                vtotal = adjusted_mode->crtc_vtotal;
@@ -12191,7 +12316,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
 
                crtc->scanline_offset = vtotal - 1;
        } else if (HAS_DDI(dev_priv) &&
-                  intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
+                  intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
                crtc->scanline_offset = 2;
        } else
                crtc->scanline_offset = 1;
@@ -12474,6 +12599,8 @@ static int intel_atomic_check(struct drm_device *dev,
                }
 
                ret = intel_modeset_pipe_config(crtc, pipe_config);
+               if (ret == -EDEADLK)
+                       return ret;
                if (ret) {
                        intel_dump_pipe_config(to_intel_crtc(crtc),
                                               pipe_config, "[failed]");
@@ -12505,6 +12632,10 @@ static int intel_atomic_check(struct drm_device *dev,
                intel_state->cdclk.logical = dev_priv->cdclk.logical;
        }
 
+       ret = icl_add_linked_planes(intel_state);
+       if (ret)
+               return ret;
+
        ret = drm_atomic_helper_check_planes(dev, state);
        if (ret)
                return ret;
@@ -12537,6 +12668,7 @@ static void intel_update_crtc(struct drm_crtc *crtc,
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_crtc_state *old_intel_cstate = to_intel_crtc_state(old_crtc_state);
        struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
        bool modeset = needs_modeset(new_crtc_state);
        struct intel_plane_state *new_plane_state =
@@ -12544,7 +12676,7 @@ static void intel_update_crtc(struct drm_crtc *crtc,
                                                 to_intel_plane(crtc->primary));
 
        if (modeset) {
-               update_scanline_offset(intel_crtc);
+               update_scanline_offset(pipe_config);
                dev_priv->display.crtc_enable(pipe_config, state);
 
                /* vblanks work again, re-enable pipe CRC. */
@@ -12557,7 +12689,12 @@ static void intel_update_crtc(struct drm_crtc *crtc,
        if (new_plane_state)
                intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
 
-       drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
+       intel_begin_crtc_commit(crtc, old_crtc_state);
+
+       intel_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc,
+                                   old_intel_cstate, pipe_config);
+
+       intel_finish_crtc_commit(crtc, old_crtc_state);
 }
 
 static void intel_update_crtcs(struct drm_atomic_state *state)
@@ -12589,13 +12726,12 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
        int i;
        u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
        u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
-
-       const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
+       struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
 
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
                /* ignore allocations for crtc's that have been turned off. */
                if (new_crtc_state->active)
-                       entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
+                       entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
 
        /* If 2nd DBuf slice required, enable it here */
        if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
@@ -12621,14 +12757,13 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
                        if (updated & cmask || !cstate->base.active)
                                continue;
 
-                       if (skl_ddb_allocation_overlaps(dev_priv,
+                       if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
                                                        entries,
-                                                       &cstate->wm.skl.ddb,
-                                                       i))
+                                                       INTEL_INFO(dev_priv)->num_pipes, i))
                                continue;
 
                        updated |= cmask;
-                       entries[i] = &cstate->wm.skl.ddb;
+                       entries[i] = cstate->wm.skl.ddb;
 
                        /*
                         * If this is an already active pipe, it's DDB changed,
@@ -12718,8 +12853,9 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+       struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
        struct drm_crtc *crtc;
-       struct intel_crtc_state *intel_cstate;
+       struct intel_crtc *intel_crtc;
        u64 put_domains[I915_MAX_PIPES] = {};
        int i;
 
@@ -12731,24 +12867,25 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
                intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
 
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-               struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+               old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
+               new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
+               intel_crtc = to_intel_crtc(crtc);
 
                if (needs_modeset(new_crtc_state) ||
                    to_intel_crtc_state(new_crtc_state)->update_pipe) {
 
-                       put_domains[to_intel_crtc(crtc)->pipe] =
+                       put_domains[intel_crtc->pipe] =
                                modeset_get_crtc_power_domains(crtc,
-                                       to_intel_crtc_state(new_crtc_state));
+                                       new_intel_crtc_state);
                }
 
                if (!needs_modeset(new_crtc_state))
                        continue;
 
-               intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
-                                      to_intel_crtc_state(new_crtc_state));
+               intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
 
                if (old_crtc_state->active) {
-                       intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
+                       intel_crtc_disable_planes(intel_crtc, old_intel_crtc_state->active_planes);
 
                        /*
                         * We need to disable pipe CRC before disabling the pipe,
@@ -12756,10 +12893,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
                         */
                        intel_crtc_disable_pipe_crc(intel_crtc);
 
-                       dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state);
+                       dev_priv->display.crtc_disable(old_intel_crtc_state, state);
                        intel_crtc->active = false;
                        intel_fbc_disable(intel_crtc);
-                       intel_disable_shared_dpll(intel_crtc);
+                       intel_disable_shared_dpll(old_intel_crtc_state);
 
                        /*
                         * Underruns don't always raise
@@ -12768,17 +12905,12 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
                        intel_check_cpu_fifo_underruns(dev_priv);
                        intel_check_pch_fifo_underruns(dev_priv);
 
-                       if (!new_crtc_state->active) {
-                               /*
-                                * Make sure we don't call initial_watermarks
-                                * for ILK-style watermark updates.
-                                *
-                                * No clue what this is supposed to achieve.
-                                */
-                               if (INTEL_GEN(dev_priv) >= 9)
-                                       dev_priv->display.initial_watermarks(intel_state,
-                                                                            to_intel_crtc_state(new_crtc_state));
-                       }
+                       /* FIXME unify this for all platforms */
+                       if (!new_crtc_state->active &&
+                           !HAS_GMCH_DISPLAY(dev_priv) &&
+                           dev_priv->display.initial_watermarks)
+                               dev_priv->display.initial_watermarks(intel_state,
+                                                                    new_intel_crtc_state);
                }
        }
 
@@ -12837,11 +12969,11 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
         * TODO: Move this (and other cleanup) to an async worker eventually.
         */
        for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
-               intel_cstate = to_intel_crtc_state(new_crtc_state);
+               new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
 
                if (dev_priv->display.optimize_watermarks)
                        dev_priv->display.optimize_watermarks(intel_state,
-                                                             intel_cstate);
+                                                             new_intel_crtc_state);
        }
 
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -13224,13 +13356,12 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 
        ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
 
-       fb_obj_bump_render_priority(obj);
-
        mutex_unlock(&dev_priv->drm.struct_mutex);
        i915_gem_object_unpin_pages(obj);
        if (ret)
                return ret;
 
+       fb_obj_bump_render_priority(obj);
        intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
 
        if (!new_state->fence) { /* implicit fencing */
@@ -13361,7 +13492,7 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
        if (intel_cstate->update_pipe)
                intel_update_pipe_config(old_intel_cstate, intel_cstate);
        else if (INTEL_GEN(dev_priv) >= 9)
-               skl_detach_scalers(intel_crtc);
+               skl_detach_scalers(intel_cstate);
 
 out:
        if (dev_priv->display.atomic_update_watermarks)
@@ -13463,56 +13594,6 @@ static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
        }
 }
 
-static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
-                                          u32 format, u64 modifier)
-{
-       struct intel_plane *plane = to_intel_plane(_plane);
-
-       switch (modifier) {
-       case DRM_FORMAT_MOD_LINEAR:
-       case I915_FORMAT_MOD_X_TILED:
-       case I915_FORMAT_MOD_Y_TILED:
-       case I915_FORMAT_MOD_Yf_TILED:
-               break;
-       case I915_FORMAT_MOD_Y_TILED_CCS:
-       case I915_FORMAT_MOD_Yf_TILED_CCS:
-               if (!plane->has_ccs)
-                       return false;
-               break;
-       default:
-               return false;
-       }
-
-       switch (format) {
-       case DRM_FORMAT_XRGB8888:
-       case DRM_FORMAT_XBGR8888:
-       case DRM_FORMAT_ARGB8888:
-       case DRM_FORMAT_ABGR8888:
-               if (is_ccs_modifier(modifier))
-                       return true;
-               /* fall through */
-       case DRM_FORMAT_RGB565:
-       case DRM_FORMAT_XRGB2101010:
-       case DRM_FORMAT_XBGR2101010:
-       case DRM_FORMAT_YUYV:
-       case DRM_FORMAT_YVYU:
-       case DRM_FORMAT_UYVY:
-       case DRM_FORMAT_VYUY:
-       case DRM_FORMAT_NV12:
-               if (modifier == I915_FORMAT_MOD_Yf_TILED)
-                       return true;
-               /* fall through */
-       case DRM_FORMAT_C8:
-               if (modifier == DRM_FORMAT_MOD_LINEAR ||
-                   modifier == I915_FORMAT_MOD_X_TILED ||
-                   modifier == I915_FORMAT_MOD_Y_TILED)
-                       return true;
-               /* fall through */
-       default:
-               return false;
-       }
-}
-
 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
                                              u32 format, u64 modifier)
 {
@@ -13520,18 +13601,7 @@ static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
                format == DRM_FORMAT_ARGB8888;
 }
 
-static struct drm_plane_funcs skl_plane_funcs = {
-       .update_plane = drm_atomic_helper_update_plane,
-       .disable_plane = drm_atomic_helper_disable_plane,
-       .destroy = intel_plane_destroy,
-       .atomic_get_property = intel_plane_atomic_get_property,
-       .atomic_set_property = intel_plane_atomic_set_property,
-       .atomic_duplicate_state = intel_plane_duplicate_state,
-       .atomic_destroy_state = intel_plane_destroy_state,
-       .format_mod_supported = skl_plane_format_mod_supported,
-};
-
-static struct drm_plane_funcs i965_plane_funcs = {
+static const struct drm_plane_funcs i965_plane_funcs = {
        .update_plane = drm_atomic_helper_update_plane,
        .disable_plane = drm_atomic_helper_disable_plane,
        .destroy = intel_plane_destroy,
@@ -13542,7 +13612,7 @@ static struct drm_plane_funcs i965_plane_funcs = {
        .format_mod_supported = i965_plane_format_mod_supported,
 };
 
-static struct drm_plane_funcs i8xx_plane_funcs = {
+static const struct drm_plane_funcs i8xx_plane_funcs = {
        .update_plane = drm_atomic_helper_update_plane,
        .disable_plane = drm_atomic_helper_disable_plane,
        .destroy = intel_plane_destroy,
@@ -13568,14 +13638,16 @@ intel_legacy_cursor_update(struct drm_plane *plane,
        struct drm_plane_state *old_plane_state, *new_plane_state;
        struct intel_plane *intel_plane = to_intel_plane(plane);
        struct drm_framebuffer *old_fb;
-       struct drm_crtc_state *crtc_state = crtc->state;
+       struct intel_crtc_state *crtc_state =
+               to_intel_crtc_state(crtc->state);
+       struct intel_crtc_state *new_crtc_state;
 
        /*
         * When crtc is inactive or there is a modeset pending,
         * wait for it to complete in the slowpath
         */
-       if (!crtc_state->active || needs_modeset(crtc_state) ||
-           to_intel_crtc_state(crtc_state)->update_pipe)
+       if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
+           crtc_state->update_pipe)
                goto slow;
 
        old_plane_state = plane->state;
@@ -13605,6 +13677,12 @@ intel_legacy_cursor_update(struct drm_plane *plane,
        if (!new_plane_state)
                return -ENOMEM;
 
+       new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
+       if (!new_crtc_state) {
+               ret = -ENOMEM;
+               goto out_free;
+       }
+
        drm_atomic_set_fb_for_plane(new_plane_state, fb);
 
        new_plane_state->src_x = src_x;
@@ -13616,9 +13694,8 @@ intel_legacy_cursor_update(struct drm_plane *plane,
        new_plane_state->crtc_w = crtc_w;
        new_plane_state->crtc_h = crtc_h;
 
-       ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state),
-                                                 to_intel_crtc_state(crtc->state), /* FIXME need a new crtc state? */
-                                                 to_intel_plane_state(plane->state),
+       ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
+                                                 to_intel_plane_state(old_plane_state),
                                                  to_intel_plane_state(new_plane_state));
        if (ret)
                goto out_free;
@@ -13640,10 +13717,21 @@ intel_legacy_cursor_update(struct drm_plane *plane,
        /* Swap plane state */
        plane->state = new_plane_state;
 
+       /*
+        * We cannot swap crtc_state as it may be in use by an atomic commit or
+        * page flip that's running simultaneously. If we swap crtc_state and
+        * destroy the old state, we will cause a use-after-free there.
+        *
+        * Only update active_planes, which is needed for our internal
+        * bookkeeping. Either value will do the right thing when updating
+        * planes atomically. If the cursor was part of the atomic update then
+        * we would have taken the slowpath.
+        */
+       crtc_state->active_planes = new_crtc_state->active_planes;
+
        if (plane->state->visible) {
                trace_intel_update_plane(plane, to_intel_crtc(crtc));
-               intel_plane->update_plane(intel_plane,
-                                         to_intel_crtc_state(crtc->state),
+               intel_plane->update_plane(intel_plane, crtc_state,
                                          to_intel_plane_state(plane->state));
        } else {
                trace_intel_disable_plane(plane, to_intel_crtc(crtc));
@@ -13655,6 +13743,8 @@ intel_legacy_cursor_update(struct drm_plane *plane,
 out_unlock:
        mutex_unlock(&dev_priv->drm.struct_mutex);
 out_free:
+       if (new_crtc_state)
+               intel_crtc_destroy_state(crtc, &new_crtc_state->base);
        if (ret)
                intel_plane_destroy_state(plane, new_plane_state);
        else
@@ -13695,176 +13785,90 @@ static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
                return i9xx_plane == PLANE_A;
 }
 
-static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
-                             enum pipe pipe, enum plane_id plane_id)
-{
-       if (!HAS_FBC(dev_priv))
-               return false;
-
-       return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
-}
-
-bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
-                         enum pipe pipe, enum plane_id plane_id)
-{
-       /*
-        * FIXME: ICL requires two hardware planes for scanning out NV12
-        * framebuffers. Do not advertize support until this is implemented.
-        */
-       if (INTEL_GEN(dev_priv) >= 11)
-               return false;
-
-       if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
-               return false;
-
-       if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
-               return false;
-
-       if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
-               return false;
-
-       return true;
-}
-
 static struct intel_plane *
 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
-       struct intel_plane *primary = NULL;
-       struct intel_plane_state *state = NULL;
+       struct intel_plane *plane;
        const struct drm_plane_funcs *plane_funcs;
-       const uint32_t *intel_primary_formats;
        unsigned int supported_rotations;
-       unsigned int num_formats;
-       const uint64_t *modifiers;
+       unsigned int possible_crtcs;
+       const u64 *modifiers;
+       const u32 *formats;
+       int num_formats;
        int ret;
 
-       primary = kzalloc(sizeof(*primary), GFP_KERNEL);
-       if (!primary) {
-               ret = -ENOMEM;
-               goto fail;
-       }
-
-       state = intel_create_plane_state(&primary->base);
-       if (!state) {
-               ret = -ENOMEM;
-               goto fail;
-       }
+       if (INTEL_GEN(dev_priv) >= 9)
+               return skl_universal_plane_create(dev_priv, pipe,
+                                                 PLANE_PRIMARY);
 
-       primary->base.state = &state->base;
+       plane = intel_plane_alloc();
+       if (IS_ERR(plane))
+               return plane;
 
-       if (INTEL_GEN(dev_priv) >= 9)
-               state->scaler_id = -1;
-       primary->pipe = pipe;
+       plane->pipe = pipe;
        /*
         * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
         * port is hooked to pipe B. Hence we want plane A feeding pipe B.
         */
        if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
-               primary->i9xx_plane = (enum i9xx_plane_id) !pipe;
-       else
-               primary->i9xx_plane = (enum i9xx_plane_id) pipe;
-       primary->id = PLANE_PRIMARY;
-       primary->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, primary->id);
-
-       if (INTEL_GEN(dev_priv) >= 9)
-               primary->has_fbc = skl_plane_has_fbc(dev_priv,
-                                                    primary->pipe,
-                                                    primary->id);
+               plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
        else
-               primary->has_fbc = i9xx_plane_has_fbc(dev_priv,
-                                                     primary->i9xx_plane);
+               plane->i9xx_plane = (enum i9xx_plane_id) pipe;
+       plane->id = PLANE_PRIMARY;
+       plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
 
-       if (primary->has_fbc) {
+       plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
+       if (plane->has_fbc) {
                struct intel_fbc *fbc = &dev_priv->fbc;
 
-               fbc->possible_framebuffer_bits |= primary->frontbuffer_bit;
+               fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
        }
 
-       if (INTEL_GEN(dev_priv) >= 9) {
-               primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
-                                                    PLANE_PRIMARY);
-
-               if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) {
-                       intel_primary_formats = skl_pri_planar_formats;
-                       num_formats = ARRAY_SIZE(skl_pri_planar_formats);
-               } else {
-                       intel_primary_formats = skl_primary_formats;
-                       num_formats = ARRAY_SIZE(skl_primary_formats);
-               }
-
-               if (primary->has_ccs)
-                       modifiers = skl_format_modifiers_ccs;
-               else
-                       modifiers = skl_format_modifiers_noccs;
-
-               primary->max_stride = skl_plane_max_stride;
-               primary->update_plane = skl_update_plane;
-               primary->disable_plane = skl_disable_plane;
-               primary->get_hw_state = skl_plane_get_hw_state;
-               primary->check_plane = skl_plane_check;
-
-               plane_funcs = &skl_plane_funcs;
-       } else if (INTEL_GEN(dev_priv) >= 4) {
-               intel_primary_formats = i965_primary_formats;
+       if (INTEL_GEN(dev_priv) >= 4) {
+               formats = i965_primary_formats;
                num_formats = ARRAY_SIZE(i965_primary_formats);
                modifiers = i9xx_format_modifiers;
 
-               primary->max_stride = i9xx_plane_max_stride;
-               primary->update_plane = i9xx_update_plane;
-               primary->disable_plane = i9xx_disable_plane;
-               primary->get_hw_state = i9xx_plane_get_hw_state;
-               primary->check_plane = i9xx_plane_check;
+               plane->max_stride = i9xx_plane_max_stride;
+               plane->update_plane = i9xx_update_plane;
+               plane->disable_plane = i9xx_disable_plane;
+               plane->get_hw_state = i9xx_plane_get_hw_state;
+               plane->check_plane = i9xx_plane_check;
 
                plane_funcs = &i965_plane_funcs;
        } else {
-               intel_primary_formats = i8xx_primary_formats;
+               formats = i8xx_primary_formats;
                num_formats = ARRAY_SIZE(i8xx_primary_formats);
                modifiers = i9xx_format_modifiers;
 
-               primary->max_stride = i9xx_plane_max_stride;
-               primary->update_plane = i9xx_update_plane;
-               primary->disable_plane = i9xx_disable_plane;
-               primary->get_hw_state = i9xx_plane_get_hw_state;
-               primary->check_plane = i9xx_plane_check;
+               plane->max_stride = i9xx_plane_max_stride;
+               plane->update_plane = i9xx_update_plane;
+               plane->disable_plane = i9xx_disable_plane;
+               plane->get_hw_state = i9xx_plane_get_hw_state;
+               plane->check_plane = i9xx_plane_check;
 
                plane_funcs = &i8xx_plane_funcs;
        }
 
-       if (INTEL_GEN(dev_priv) >= 9)
-               ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
-                                              0, plane_funcs,
-                                              intel_primary_formats, num_formats,
-                                              modifiers,
-                                              DRM_PLANE_TYPE_PRIMARY,
-                                              "plane 1%c", pipe_name(pipe));
-       else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
-               ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
-                                              0, plane_funcs,
-                                              intel_primary_formats, num_formats,
-                                              modifiers,
+       possible_crtcs = BIT(pipe);
+
+       if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+               ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+                                              possible_crtcs, plane_funcs,
+                                              formats, num_formats, modifiers,
                                               DRM_PLANE_TYPE_PRIMARY,
                                               "primary %c", pipe_name(pipe));
        else
-               ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
-                                              0, plane_funcs,
-                                              intel_primary_formats, num_formats,
-                                              modifiers,
+               ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+                                              possible_crtcs, plane_funcs,
+                                              formats, num_formats, modifiers,
                                               DRM_PLANE_TYPE_PRIMARY,
                                               "plane %c",
-                                              plane_name(primary->i9xx_plane));
+                                              plane_name(plane->i9xx_plane));
        if (ret)
                goto fail;
 
-       if (INTEL_GEN(dev_priv) >= 10) {
-               supported_rotations =
-                       DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
-                       DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 |
-                       DRM_MODE_REFLECT_X;
-       } else if (INTEL_GEN(dev_priv) >= 9) {
-               supported_rotations =
-                       DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
-                       DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
-       } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+       if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
                supported_rotations =
                        DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
                        DRM_MODE_REFLECT_X;
@@ -13876,26 +13880,16 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
        }
 
        if (INTEL_GEN(dev_priv) >= 4)
-               drm_plane_create_rotation_property(&primary->base,
+               drm_plane_create_rotation_property(&plane->base,
                                                   DRM_MODE_ROTATE_0,
                                                   supported_rotations);
 
-       if (INTEL_GEN(dev_priv) >= 9)
-               drm_plane_create_color_properties(&primary->base,
-                                                 BIT(DRM_COLOR_YCBCR_BT601) |
-                                                 BIT(DRM_COLOR_YCBCR_BT709),
-                                                 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
-                                                 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
-                                                 DRM_COLOR_YCBCR_BT709,
-                                                 DRM_COLOR_YCBCR_LIMITED_RANGE);
-
-       drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
+       drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
 
-       return primary;
+       return plane;
 
 fail:
-       kfree(state);
-       kfree(primary);
+       intel_plane_free(plane);
 
        return ERR_PTR(ret);
 }
@@ -13904,23 +13898,13 @@ static struct intel_plane *
 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
                          enum pipe pipe)
 {
-       struct intel_plane *cursor = NULL;
-       struct intel_plane_state *state = NULL;
+       unsigned int possible_crtcs;
+       struct intel_plane *cursor;
        int ret;
 
-       cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
-       if (!cursor) {
-               ret = -ENOMEM;
-               goto fail;
-       }
-
-       state = intel_create_plane_state(&cursor->base);
-       if (!state) {
-               ret = -ENOMEM;
-               goto fail;
-       }
-
-       cursor->base.state = &state->base;
+       cursor = intel_plane_alloc();
+       if (IS_ERR(cursor))
+               return cursor;
 
        cursor->pipe = pipe;
        cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
@@ -13947,8 +13931,10 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
        if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
                cursor->cursor.size = ~0;
 
+       possible_crtcs = BIT(pipe);
+
        ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
-                                      0, &intel_cursor_plane_funcs,
+                                      possible_crtcs, &intel_cursor_plane_funcs,
                                       intel_cursor_formats,
                                       ARRAY_SIZE(intel_cursor_formats),
                                       cursor_format_modifiers,
@@ -13963,16 +13949,12 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
                                                   DRM_MODE_ROTATE_0 |
                                                   DRM_MODE_ROTATE_180);
 
-       if (INTEL_GEN(dev_priv) >= 9)
-               state->scaler_id = -1;
-
        drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
 
        return cursor;
 
 fail:
-       kfree(state);
-       kfree(cursor);
+       intel_plane_free(cursor);
 
        return ERR_PTR(ret);
 }
@@ -13993,7 +13975,7 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
                struct intel_scaler *scaler = &scaler_state->scalers[i];
 
                scaler->in_use = 0;
-               scaler->mode = PS_SCALER_MODE_DYN;
+               scaler->mode = 0;
        }
 
        scaler_state->scaler_id = -1;
@@ -14088,18 +14070,6 @@ fail:
        return ret;
 }
 
-enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
-{
-       struct drm_device *dev = connector->base.dev;
-
-       WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
-
-       if (!connector->base.state->crtc)
-               return INVALID_PIPE;
-
-       return to_intel_crtc(connector->base.state->crtc)->pipe;
-}
-
 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
                                      struct drm_file *file)
 {
@@ -14236,6 +14206,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
                intel_ddi_init(dev_priv, PORT_D);
                intel_ddi_init(dev_priv, PORT_E);
                intel_ddi_init(dev_priv, PORT_F);
+               icl_dsi_init(dev_priv);
        } else if (IS_GEN9_LP(dev_priv)) {
                /*
                 * FIXME: Broxton doesn't support port detection via the
@@ -14458,7 +14429,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
 
 static
 u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
-                        uint64_t fb_modifier, uint32_t pixel_format)
+                        u32 pixel_format, u64 fb_modifier)
 {
        struct intel_crtc *crtc;
        struct intel_plane *plane;
@@ -14526,13 +14497,19 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
                        goto err;
                }
                /* fall through */
-       case I915_FORMAT_MOD_Y_TILED:
        case I915_FORMAT_MOD_Yf_TILED:
+               if (mode_cmd->pixel_format == DRM_FORMAT_C8) {
+                       DRM_DEBUG_KMS("Indexed format does not support Yf tiling\n");
+                       goto err;
+               }
+               /* fall through */
+       case I915_FORMAT_MOD_Y_TILED:
                if (INTEL_GEN(dev_priv) < 9) {
                        DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n",
                                      mode_cmd->modifier[0]);
                        goto err;
                }
+               break;
        case DRM_FORMAT_MOD_LINEAR:
        case I915_FORMAT_MOD_X_TILED:
                break;
@@ -14552,8 +14529,8 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
                goto err;
        }
 
-       pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
-                                          mode_cmd->pixel_format);
+       pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->pixel_format,
+                                          mode_cmd->modifier[0]);
        if (mode_cmd->pitches[0] > pitch_limit) {
                DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
                              mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
@@ -14622,7 +14599,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
                break;
        case DRM_FORMAT_NV12:
                if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
-                   IS_BROXTON(dev_priv) || INTEL_GEN(dev_priv) >= 11) {
+                   IS_BROXTON(dev_priv)) {
                        DRM_DEBUG_KMS("unsupported pixel format: %s\n",
                                      drm_get_format_name(mode_cmd->pixel_format,
                                                          &format_name));
@@ -14646,7 +14623,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
             fb->height < SKL_MIN_YUV_420_SRC_H ||
             (fb->width % 4) != 0 || (fb->height % 4) != 0)) {
                DRM_DEBUG_KMS("src dimensions not correct for NV12\n");
-               return -EINVAL;
+               goto err;
        }
 
        for (i = 0; i < fb->format->num_planes; i++) {
@@ -14906,174 +14883,6 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
                dev_priv->display.update_crtcs = intel_update_crtcs;
 }
 
-/*
- * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
- */
-static void quirk_ssc_force_disable(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
-       DRM_INFO("applying lvds SSC disable quirk\n");
-}
-
-/*
- * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
- * brightness value
- */
-static void quirk_invert_brightness(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
-       DRM_INFO("applying inverted panel brightness quirk\n");
-}
-
-/* Some VBT's incorrectly indicate no backlight is present */
-static void quirk_backlight_present(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
-       DRM_INFO("applying backlight present quirk\n");
-}
-
-/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
- * which is 300 ms greater than eDP spec T12 min.
- */
-static void quirk_increase_t12_delay(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
-       dev_priv->quirks |= QUIRK_INCREASE_T12_DELAY;
-       DRM_INFO("Applying T12 delay quirk\n");
-}
-
-/*
- * GeminiLake NUC HDMI outputs require additional off time
- * this allows the onboard retimer to correctly sync to signal
- */
-static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
-       dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
-       DRM_INFO("Applying Increase DDI Disabled quirk\n");
-}
-
-struct intel_quirk {
-       int device;
-       int subsystem_vendor;
-       int subsystem_device;
-       void (*hook)(struct drm_device *dev);
-};
-
-/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
-struct intel_dmi_quirk {
-       void (*hook)(struct drm_device *dev);
-       const struct dmi_system_id (*dmi_id_list)[];
-};
-
-static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
-{
-       DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
-       return 1;
-}
-
-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
-       {
-               .dmi_id_list = &(const struct dmi_system_id[]) {
-                       {
-                               .callback = intel_dmi_reverse_brightness,
-                               .ident = "NCR Corporation",
-                               .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
-                                           DMI_MATCH(DMI_PRODUCT_NAME, ""),
-                               },
-                       },
-                       { }  /* terminating entry */
-               },
-               .hook = quirk_invert_brightness,
-       },
-};
-
-static struct intel_quirk intel_quirks[] = {
-       /* Lenovo U160 cannot use SSC on LVDS */
-       { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
-
-       /* Sony Vaio Y cannot use SSC on LVDS */
-       { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
-
-       /* Acer Aspire 5734Z must invert backlight brightness */
-       { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
-
-       /* Acer/eMachines G725 */
-       { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
-
-       /* Acer/eMachines e725 */
-       { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
-
-       /* Acer/Packard Bell NCL20 */
-       { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
-
-       /* Acer Aspire 4736Z */
-       { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
-
-       /* Acer Aspire 5336 */
-       { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
-
-       /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
-       { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
-
-       /* Acer C720 Chromebook (Core i3 4005U) */
-       { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
-
-       /* Apple Macbook 2,1 (Core 2 T7400) */
-       { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
-
-       /* Apple Macbook 4,1 */
-       { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
-
-       /* Toshiba CB35 Chromebook (Celeron 2955U) */
-       { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
-
-       /* HP Chromebook 14 (Celeron 2955U) */
-       { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
-
-       /* Dell Chromebook 11 */
-       { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
-
-       /* Dell Chromebook 11 (2015 version) */
-       { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
-
-       /* Toshiba Satellite P50-C-18C */
-       { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
-
-       /* GeminiLake NUC */
-       { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
-       { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
-       /* ASRock ITX*/
-       { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
-       { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
-};
-
-static void intel_init_quirks(struct drm_device *dev)
-{
-       struct pci_dev *d = dev->pdev;
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
-               struct intel_quirk *q = &intel_quirks[i];
-
-               if (d->device == q->device &&
-                   (d->subsystem_vendor == q->subsystem_vendor ||
-                    q->subsystem_vendor == PCI_ANY_ID) &&
-                   (d->subsystem_device == q->subsystem_device ||
-                    q->subsystem_device == PCI_ANY_ID))
-                       q->hook(dev);
-       }
-       for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
-               if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
-                       intel_dmi_quirks[i].hook(dev);
-       }
-}
-
 /* Disable the VGA plane that we never use */
 static void i915_disable_vga(struct drm_i915_private *dev_priv)
 {
@@ -15233,6 +15042,14 @@ retry:
                        ret = drm_atomic_add_affected_planes(state, crtc);
                        if (ret)
                                goto out;
+
+                       /*
+                        * FIXME hack to force a LUT update to avoid the
+                        * plane update forcing the pipe gamma on without
+                        * having a proper LUT loaded. Remove once we
+                        * have readout for pipe gamma enable.
+                        */
+                       crtc_state->color_mgmt_changed = true;
                }
        }
 
@@ -15279,7 +15096,9 @@ int intel_modeset_init(struct drm_device *dev)
        INIT_WORK(&dev_priv->atomic_helper.free_work,
                  intel_atomic_helper_free_state_worker);
 
-       intel_init_quirks(dev);
+       intel_init_quirks(dev_priv);
+
+       intel_fbc_init(dev_priv);
 
        intel_init_pm(dev_priv);
 
@@ -15511,8 +15330,8 @@ intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
                if (pipe == crtc->pipe)
                        continue;
 
-               DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n",
-                             plane->base.name);
+               DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
+                             plane->base.base.id, plane->base.name);
 
                plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
                intel_plane_disable_noatomic(plane_crtc, plane);
@@ -15553,7 +15372,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
+       struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 
        /* Clear any frame start delays used for debugging left by the BIOS */
        if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
@@ -15563,7 +15383,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
                           I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
        }
 
-       if (crtc->active) {
+       if (crtc_state->base.active) {
                struct intel_plane *plane;
 
                /* Disable everything but the primary plane */
@@ -15579,10 +15399,10 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
 
        /* Adjust the state of the output pipe according to whether we
         * have active connectors/encoders. */
-       if (crtc->active && !intel_crtc_has_encoders(crtc))
+       if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
                intel_crtc_disable_noatomic(&crtc->base, ctx);
 
-       if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
+       if (crtc_state->base.active || HAS_GMCH_DISPLAY(dev_priv)) {
                /*
                 * We start out with underrun reporting disabled to avoid races.
                 * For correct bookkeeping mark this on active crtcs.
@@ -15613,6 +15433,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
 
 static void intel_sanitize_encoder(struct intel_encoder *encoder)
 {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_connector *connector;
 
        /* We need to check both for a crtc link (meaning that the
@@ -15636,7 +15457,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
                        DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
                                      encoder->base.base.id,
                                      encoder->base.name);
-                       encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
+                       if (encoder->disable)
+                               encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
                        if (encoder->post_disable)
                                encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
                }
@@ -15653,6 +15475,9 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
 
        /* notify opregion of the sanitized encoder state */
        intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
+
+       if (INTEL_GEN(dev_priv) >= 11)
+               icl_sanitize_encoder_pll_mapping(encoder);
 }
 
 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
@@ -15701,6 +15526,10 @@ static void readout_plane_state(struct drm_i915_private *dev_priv)
                crtc_state = to_intel_crtc_state(crtc->base.state);
 
                intel_set_plane_visible(crtc_state, plane_state, visible);
+
+               DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
+                             plane->base.base.id, plane->base.name,
+                             enableddisabled(visible), pipe_name(pipe));
        }
 
        for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -15853,7 +15682,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 
                        drm_calc_timestamping_constants(&crtc->base,
                                                        &crtc_state->base.adjusted_mode);
-                       update_scanline_offset(crtc);
+                       update_scanline_offset(crtc_state);
                }
 
                dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
@@ -15908,6 +15737,65 @@ static void intel_early_display_was(struct drm_i915_private *dev_priv)
        }
 }
 
+static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
+                                      enum port port, i915_reg_t hdmi_reg)
+{
+       u32 val = I915_READ(hdmi_reg);
+
+       if (val & SDVO_ENABLE ||
+           (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
+               return;
+
+       DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
+                     port_name(port));
+
+       val &= ~SDVO_PIPE_SEL_MASK;
+       val |= SDVO_PIPE_SEL(PIPE_A);
+
+       I915_WRITE(hdmi_reg, val);
+}
+
+static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
+                                    enum port port, i915_reg_t dp_reg)
+{
+       u32 val = I915_READ(dp_reg);
+
+       if (val & DP_PORT_EN ||
+           (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
+               return;
+
+       DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
+                     port_name(port));
+
+       val &= ~DP_PIPE_SEL_MASK;
+       val |= DP_PIPE_SEL(PIPE_A);
+
+       I915_WRITE(dp_reg, val);
+}
+
+static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
+{
+       /*
+        * The BIOS may select transcoder B on some of the PCH
+        * ports even it doesn't enable the port. This would trip
+        * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
+        * Sanitize the transcoder select bits to prevent that. We
+        * assume that the BIOS never actually enabled the port,
+        * because if it did we'd actually have to toggle the port
+        * on and back off to make the transcoder A select stick
+        * (see. intel_dp_link_down(), intel_disable_hdmi(),
+        * intel_disable_sdvo()).
+        */
+       ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
+       ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
+       ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
+
+       /* PCH SDVOB multiplex with HDMIB */
+       ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
+       ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
+       ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
+}
+
 /* Scan out the current hw modeset state,
  * and sanitizes it to the current state
  */
@@ -15917,6 +15805,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc;
+       struct intel_crtc_state *crtc_state;
        struct intel_encoder *encoder;
        int i;
 
@@ -15928,6 +15817,9 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
        /* HW state is read out, now we need to sanitize this mess. */
        get_encoder_power_domains(dev_priv);
 
+       if (HAS_PCH_IBX(dev_priv))
+               ibx_sanitize_pch_ports(dev_priv);
+
        /*
         * intel_sanitize_plane_mapping() may need to do vblank
         * waits, so we need vblank interrupts restored beforehand.
@@ -15935,7 +15827,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
        for_each_intel_crtc(&dev_priv->drm, crtc) {
                drm_crtc_vblank_reset(&crtc->base);
 
-               if (crtc->active)
+               if (crtc->base.state->active)
                        drm_crtc_vblank_on(&crtc->base);
        }
 
@@ -15945,8 +15837,9 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
                intel_sanitize_encoder(encoder);
 
        for_each_intel_crtc(&dev_priv->drm, crtc) {
+               crtc_state = to_intel_crtc_state(crtc->base.state);
                intel_sanitize_crtc(crtc, ctx);
-               intel_dump_pipe_config(crtc, crtc->config,
+               intel_dump_pipe_config(crtc, crtc_state,
                                       "[setup_hw_state]");
        }
 
@@ -15980,7 +15873,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
        for_each_intel_crtc(dev, crtc) {
                u64 put_domains;
 
-               put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
+               crtc_state = to_intel_crtc_state(crtc->base.state);
+               put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
                if (WARN_ON(put_domains))
                        modeset_put_power_domains(dev_priv, put_domains);
        }
@@ -16024,29 +15918,6 @@ void intel_display_resume(struct drm_device *dev)
                drm_atomic_state_put(state);
 }
 
-int intel_connector_register(struct drm_connector *connector)
-{
-       struct intel_connector *intel_connector = to_intel_connector(connector);
-       int ret;
-
-       ret = intel_backlight_device_register(intel_connector);
-       if (ret)
-               goto err;
-
-       return 0;
-
-err:
-       return ret;
-}
-
-void intel_connector_unregister(struct drm_connector *connector)
-{
-       struct intel_connector *intel_connector = to_intel_connector(connector);
-
-       intel_backlight_device_unregister(intel_connector);
-       intel_panel_destroy_backlight(connector);
-}
-
 static void intel_hpd_poll_fini(struct drm_device *dev)
 {
        struct intel_connector *connector;
@@ -16057,9 +15928,9 @@ static void intel_hpd_poll_fini(struct drm_device *dev)
        for_each_intel_connector_iter(connector, &conn_iter) {
                if (connector->modeset_retry_work.func)
                        cancel_work_sync(&connector->modeset_retry_work);
-               if (connector->hdcp_shim) {
-                       cancel_delayed_work_sync(&connector->hdcp_check_work);
-                       cancel_work_sync(&connector->hdcp_prop_work);
+               if (connector->hdcp.shim) {
+                       cancel_delayed_work_sync(&connector->hdcp.check_work);
+                       cancel_work_sync(&connector->hdcp.prop_work);
                }
        }
        drm_connector_list_iter_end(&conn_iter);
@@ -16099,18 +15970,13 @@ void intel_modeset_cleanup(struct drm_device *dev)
 
        drm_mode_config_cleanup(dev);
 
-       intel_cleanup_overlay(dev_priv);
+       intel_overlay_cleanup(dev_priv);
 
        intel_teardown_gmbus(dev_priv);
 
        destroy_workqueue(dev_priv->modeset_wq);
-}
 
-void intel_connector_attach_encoder(struct intel_connector *connector,
-                                   struct intel_encoder *encoder)
-{
-       connector->encoder = encoder;
-       drm_connector_attach_encoder(&connector->base, &encoder->base);
+       intel_fbc_cleanup_cfb(dev_priv);
 }
 
 /*
index 9fac67e31205ff19ae9398b23bfd07a1191ac541..5f2955b944da7e2201119b6c8fa414f11534d052 100644 (file)
@@ -43,6 +43,11 @@ enum i915_gpio {
        GPIOM,
 };
 
+/*
+ * Keep the pipe enum values fixed: the code assumes that PIPE_A=0, the
+ * rest have consecutive values and match the enum values of transcoders
+ * with a 1:1 transcoder -> pipe mapping.
+ */
 enum pipe {
        INVALID_PIPE = -1,
 
@@ -57,12 +62,25 @@ enum pipe {
 #define pipe_name(p) ((p) + 'A')
 
 enum transcoder {
-       TRANSCODER_A = 0,
-       TRANSCODER_B,
-       TRANSCODER_C,
+       /*
+        * The following transcoders have a 1:1 transcoder -> pipe mapping,
+        * keep their values fixed: the code assumes that TRANSCODER_A=0, the
+        * rest have consecutive values and match the enum values of the pipes
+        * they map to.
+        */
+       TRANSCODER_A = PIPE_A,
+       TRANSCODER_B = PIPE_B,
+       TRANSCODER_C = PIPE_C,
+
+       /*
+        * The following transcoders can map to any pipe, their enum value
+        * doesn't need to stay fixed.
+        */
        TRANSCODER_EDP,
-       TRANSCODER_DSI_A,
-       TRANSCODER_DSI_C,
+       TRANSCODER_DSI_0,
+       TRANSCODER_DSI_1,
+       TRANSCODER_DSI_A = TRANSCODER_DSI_0,    /* legacy DSI */
+       TRANSCODER_DSI_C = TRANSCODER_DSI_1,    /* legacy DSI */
 
        I915_MAX_TRANSCODERS
 };
@@ -120,6 +138,9 @@ enum plane_id {
        PLANE_SPRITE0,
        PLANE_SPRITE1,
        PLANE_SPRITE2,
+       PLANE_SPRITE3,
+       PLANE_SPRITE4,
+       PLANE_SPRITE5,
        PLANE_CURSOR,
 
        I915_MAX_PLANES,
@@ -363,7 +384,7 @@ struct intel_link_m_n {
                (__dev_priv)->power_domains.power_well_count;           \
             (__power_well)++)
 
-#define for_each_power_well_rev(__dev_priv, __power_well)                      \
+#define for_each_power_well_reverse(__dev_priv, __power_well)                  \
        for ((__power_well) = (__dev_priv)->power_domains.power_wells +         \
                              (__dev_priv)->power_domains.power_well_count - 1; \
             (__power_well) - (__dev_priv)->power_domains.power_wells >= 0;     \
@@ -373,8 +394,8 @@ struct intel_link_m_n {
        for_each_power_well(__dev_priv, __power_well)                           \
                for_each_if((__power_well)->desc->domains & (__domain_mask))
 
-#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \
-       for_each_power_well_rev(__dev_priv, __power_well)                       \
+#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \
+       for_each_power_well_reverse(__dev_priv, __power_well)                   \
                for_each_if((__power_well)->desc->domains & (__domain_mask))
 
 #define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
index 13f9b56a9ce7ca711467fc9309b720f8d9565661..7699f9b7b2d2ab0b982eb4910d8121931dc415cf 100644 (file)
 
 #define DP_DPRX_ESI_LEN 14
 
+/* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
+#define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER     61440
+
+/* DP DSC throughput values used for slice count calculations KPixels/s */
+#define DP_DSC_PEAK_PIXEL_RATE                 2720000
+#define DP_DSC_MAX_ENC_THROUGHPUT_0            340000
+#define DP_DSC_MAX_ENC_THROUGHPUT_1            400000
+
+/* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
+#define DP_DSC_FEC_OVERHEAD_FACTOR             976
+
 /* Compliance test status bits  */
 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
 #define INTEL_DP_RESOLUTION_PREFERRED  (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
@@ -93,6 +104,14 @@ static const struct dp_link_dpll chv_dpll[] = {
                { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
 };
 
+/* Constants for DP DSC configurations */
+static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
+
+/* With Single pipe configuration, HW is capable of supporting maximum
+ * of 4 slices per line.
+ */
+static const u8 valid_dsc_slicecount[] = {1, 2, 4};
+
 /**
  * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
  * @intel_dp: DP struct
@@ -222,138 +241,6 @@ intel_dp_link_required(int pixel_clock, int bpp)
        return DIV_ROUND_UP(pixel_clock * bpp, 8);
 }
 
-void icl_program_mg_dp_mode(struct intel_dp *intel_dp)
-{
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       enum port port = intel_dig_port->base.port;
-       enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
-       u32 ln0, ln1, lane_info;
-
-       if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
-               return;
-
-       ln0 = I915_READ(MG_DP_MODE(port, 0));
-       ln1 = I915_READ(MG_DP_MODE(port, 1));
-
-       switch (intel_dig_port->tc_type) {
-       case TC_PORT_TYPEC:
-               ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
-               ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
-
-               lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
-                            DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
-                           DP_LANE_ASSIGNMENT_SHIFT(tc_port);
-
-               switch (lane_info) {
-               case 0x1:
-               case 0x4:
-                       break;
-               case 0x2:
-                       ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
-                       break;
-               case 0x3:
-                       ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
-                              MG_DP_MODE_CFG_DP_X2_MODE;
-                       break;
-               case 0x8:
-                       ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
-                       break;
-               case 0xC:
-                       ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
-                              MG_DP_MODE_CFG_DP_X2_MODE;
-                       break;
-               case 0xF:
-                       ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
-                              MG_DP_MODE_CFG_DP_X2_MODE;
-                       ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
-                              MG_DP_MODE_CFG_DP_X2_MODE;
-                       break;
-               default:
-                       MISSING_CASE(lane_info);
-               }
-               break;
-
-       case TC_PORT_LEGACY:
-               ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
-               ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
-               break;
-
-       default:
-               MISSING_CASE(intel_dig_port->tc_type);
-               return;
-       }
-
-       I915_WRITE(MG_DP_MODE(port, 0), ln0);
-       I915_WRITE(MG_DP_MODE(port, 1), ln1);
-}
-
-void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
-{
-       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-       enum port port = dig_port->base.port;
-       enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
-       i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
-       u32 val;
-       int i;
-
-       if (tc_port == PORT_TC_NONE)
-               return;
-
-       for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
-               val = I915_READ(mg_regs[i]);
-               val |= MG_DP_MODE_CFG_TR2PWR_GATING |
-                      MG_DP_MODE_CFG_TRPWR_GATING |
-                      MG_DP_MODE_CFG_CLNPWR_GATING |
-                      MG_DP_MODE_CFG_DIGPWR_GATING |
-                      MG_DP_MODE_CFG_GAONPWR_GATING;
-               I915_WRITE(mg_regs[i], val);
-       }
-
-       val = I915_READ(MG_MISC_SUS0(tc_port));
-       val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) |
-              MG_MISC_SUS0_CFG_TR2PWR_GATING |
-              MG_MISC_SUS0_CFG_CL2PWR_GATING |
-              MG_MISC_SUS0_CFG_GAONPWR_GATING |
-              MG_MISC_SUS0_CFG_TRPWR_GATING |
-              MG_MISC_SUS0_CFG_CL1PWR_GATING |
-              MG_MISC_SUS0_CFG_DGPWR_GATING;
-       I915_WRITE(MG_MISC_SUS0(tc_port), val);
-}
-
-void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
-{
-       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-       enum port port = dig_port->base.port;
-       enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
-       i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
-       u32 val;
-       int i;
-
-       if (tc_port == PORT_TC_NONE)
-               return;
-
-       for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
-               val = I915_READ(mg_regs[i]);
-               val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
-                        MG_DP_MODE_CFG_TRPWR_GATING |
-                        MG_DP_MODE_CFG_CLNPWR_GATING |
-                        MG_DP_MODE_CFG_DIGPWR_GATING |
-                        MG_DP_MODE_CFG_GAONPWR_GATING);
-               I915_WRITE(mg_regs[i], val);
-       }
-
-       val = I915_READ(MG_MISC_SUS0(tc_port));
-       val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK |
-                MG_MISC_SUS0_CFG_TR2PWR_GATING |
-                MG_MISC_SUS0_CFG_CL2PWR_GATING |
-                MG_MISC_SUS0_CFG_GAONPWR_GATING |
-                MG_MISC_SUS0_CFG_TRPWR_GATING |
-                MG_MISC_SUS0_CFG_CL1PWR_GATING |
-                MG_MISC_SUS0_CFG_DGPWR_GATING);
-       I915_WRITE(MG_MISC_SUS0(tc_port), val);
-}
-
 int
 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
 {
@@ -455,7 +342,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
        if (INTEL_GEN(dev_priv) >= 10) {
                source_rates = cnl_rates;
                size = ARRAY_SIZE(cnl_rates);
-               if (INTEL_GEN(dev_priv) == 10)
+               if (IS_GEN10(dev_priv))
                        max_rate = cnl_max_source_rate(intel_dp);
                else
                        max_rate = icl_max_source_rate(intel_dp);
@@ -616,9 +503,12 @@ intel_dp_mode_valid(struct drm_connector *connector,
        struct intel_dp *intel_dp = intel_attached_dp(connector);
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
        int target_clock = mode->clock;
        int max_rate, mode_rate, max_lanes, max_link_clock;
        int max_dotclk;
+       u16 dsc_max_output_bpp = 0;
+       u8 dsc_slice_count = 0;
 
        if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
                return MODE_NO_DBLESCAN;
@@ -641,7 +531,33 @@ intel_dp_mode_valid(struct drm_connector *connector,
        max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
        mode_rate = intel_dp_link_required(target_clock, 18);
 
-       if (mode_rate > max_rate || target_clock > max_dotclk)
+       /*
+        * Output bpp is stored in 6.4 format so right shift by 4 to get the
+        * integer value since we support only integer values of bpp.
+        */
+       if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
+           drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
+               if (intel_dp_is_edp(intel_dp)) {
+                       dsc_max_output_bpp =
+                               drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
+                       dsc_slice_count =
+                               drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+                                                               true);
+               } else {
+                       dsc_max_output_bpp =
+                               intel_dp_dsc_get_output_bpp(max_link_clock,
+                                                           max_lanes,
+                                                           target_clock,
+                                                           mode->hdisplay) >> 4;
+                       dsc_slice_count =
+                               intel_dp_dsc_get_slice_count(intel_dp,
+                                                            target_clock,
+                                                            mode->hdisplay);
+               }
+       }
+
+       if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
+           target_clock > max_dotclk)
                return MODE_CLOCK_HIGH;
 
        if (mode->clock < 10000)
@@ -690,7 +606,8 @@ static void pps_lock(struct intel_dp *intel_dp)
         * See intel_power_sequencer_reset() why we need
         * a power domain reference here.
         */
-       intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+       intel_display_power_get(dev_priv,
+                               intel_aux_power_domain(dp_to_dig_port(intel_dp)));
 
        mutex_lock(&dev_priv->pps_mutex);
 }
@@ -701,7 +618,8 @@ static void pps_unlock(struct intel_dp *intel_dp)
 
        mutex_unlock(&dev_priv->pps_mutex);
 
-       intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+       intel_display_power_put(dev_priv,
+                               intel_aux_power_domain(dp_to_dig_port(intel_dp)));
 }
 
 static void
@@ -1156,6 +1074,7 @@ static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 
        if (index)
                return 0;
@@ -1165,7 +1084,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
         * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
         * divide by 2000 and use that
         */
-       if (intel_dp->aux_ch == AUX_CH_A)
+       if (dig_port->aux_ch == AUX_CH_A)
                return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
        else
                return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
@@ -1174,8 +1093,9 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 
-       if (intel_dp->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
+       if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
                /* Workaround for non-ULT HSW */
                switch (index) {
                case 0: return 63;
@@ -1503,80 +1423,12 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
        return ret;
 }
 
-static enum aux_ch intel_aux_ch(struct intel_dp *intel_dp)
-{
-       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       enum port port = encoder->port;
-       const struct ddi_vbt_port_info *info =
-               &dev_priv->vbt.ddi_port_info[port];
-       enum aux_ch aux_ch;
-
-       if (!info->alternate_aux_channel) {
-               aux_ch = (enum aux_ch) port;
-
-               DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
-                             aux_ch_name(aux_ch), port_name(port));
-               return aux_ch;
-       }
-
-       switch (info->alternate_aux_channel) {
-       case DP_AUX_A:
-               aux_ch = AUX_CH_A;
-               break;
-       case DP_AUX_B:
-               aux_ch = AUX_CH_B;
-               break;
-       case DP_AUX_C:
-               aux_ch = AUX_CH_C;
-               break;
-       case DP_AUX_D:
-               aux_ch = AUX_CH_D;
-               break;
-       case DP_AUX_E:
-               aux_ch = AUX_CH_E;
-               break;
-       case DP_AUX_F:
-               aux_ch = AUX_CH_F;
-               break;
-       default:
-               MISSING_CASE(info->alternate_aux_channel);
-               aux_ch = AUX_CH_A;
-               break;
-       }
-
-       DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
-                     aux_ch_name(aux_ch), port_name(port));
-
-       return aux_ch;
-}
-
-static enum intel_display_power_domain
-intel_aux_power_domain(struct intel_dp *intel_dp)
-{
-       switch (intel_dp->aux_ch) {
-       case AUX_CH_A:
-               return POWER_DOMAIN_AUX_A;
-       case AUX_CH_B:
-               return POWER_DOMAIN_AUX_B;
-       case AUX_CH_C:
-               return POWER_DOMAIN_AUX_C;
-       case AUX_CH_D:
-               return POWER_DOMAIN_AUX_D;
-       case AUX_CH_E:
-               return POWER_DOMAIN_AUX_E;
-       case AUX_CH_F:
-               return POWER_DOMAIN_AUX_F;
-       default:
-               MISSING_CASE(intel_dp->aux_ch);
-               return POWER_DOMAIN_AUX_A;
-       }
-}
 
 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       enum aux_ch aux_ch = intel_dp->aux_ch;
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       enum aux_ch aux_ch = dig_port->aux_ch;
 
        switch (aux_ch) {
        case AUX_CH_B:
@@ -1592,7 +1444,8 @@ static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       enum aux_ch aux_ch = intel_dp->aux_ch;
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       enum aux_ch aux_ch = dig_port->aux_ch;
 
        switch (aux_ch) {
        case AUX_CH_B:
@@ -1608,7 +1461,8 @@ static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       enum aux_ch aux_ch = intel_dp->aux_ch;
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       enum aux_ch aux_ch = dig_port->aux_ch;
 
        switch (aux_ch) {
        case AUX_CH_A:
@@ -1626,7 +1480,8 @@ static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       enum aux_ch aux_ch = intel_dp->aux_ch;
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       enum aux_ch aux_ch = dig_port->aux_ch;
 
        switch (aux_ch) {
        case AUX_CH_A:
@@ -1644,7 +1499,8 @@ static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       enum aux_ch aux_ch = intel_dp->aux_ch;
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       enum aux_ch aux_ch = dig_port->aux_ch;
 
        switch (aux_ch) {
        case AUX_CH_A:
@@ -1663,7 +1519,8 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       enum aux_ch aux_ch = intel_dp->aux_ch;
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       enum aux_ch aux_ch = dig_port->aux_ch;
 
        switch (aux_ch) {
        case AUX_CH_A:
@@ -1689,10 +1546,8 @@ static void
 intel_dp_aux_init(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
-
-       intel_dp->aux_ch = intel_aux_ch(intel_dp);
-       intel_dp->aux_power_domain = intel_aux_power_domain(intel_dp);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct intel_encoder *encoder = &dig_port->base;
 
        if (INTEL_GEN(dev_priv) >= 9) {
                intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
@@ -1951,6 +1806,42 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
        return false;
 }
 
+/* Optimize link config in order: max bpp, min lanes, min clock */
+static bool
+intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
+                                 struct intel_crtc_state *pipe_config,
+                                 const struct link_config_limits *limits)
+{
+       struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+       int bpp, clock, lane_count;
+       int mode_rate, link_clock, link_avail;
+
+       for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
+               mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
+                                                  bpp);
+
+               for (lane_count = limits->min_lane_count;
+                    lane_count <= limits->max_lane_count;
+                    lane_count <<= 1) {
+                       for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
+                               link_clock = intel_dp->common_rates[clock];
+                               link_avail = intel_dp_max_data_rate(link_clock,
+                                                                   lane_count);
+
+                               if (mode_rate <= link_avail) {
+                                       pipe_config->lane_count = lane_count;
+                                       pipe_config->pipe_bpp = bpp;
+                                       pipe_config->port_clock = link_clock;
+
+                                       return true;
+                               }
+                       }
+               }
+       }
+
+       return false;
+}
+
 static bool
 intel_dp_compute_link_config(struct intel_encoder *encoder,
                             struct intel_crtc_state *pipe_config)
@@ -1975,13 +1866,15 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
        limits.min_bpp = 6 * 3;
        limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
 
-       if (intel_dp_is_edp(intel_dp)) {
+       if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
                /*
                 * Use the maximum clock and number of lanes the eDP panel
-                * advertizes being capable of. The panels are generally
-                * designed to support only a single clock and lane
-                * configuration, and typically these values correspond to the
-                * native resolution of the panel.
+                * advertizes being capable of. The eDP 1.3 and earlier panels
+                * are generally designed to support only a single clock and
+                * lane configuration, and typically these values correspond to
+                * the native resolution of the panel. With eDP 1.4 rate select
+                * and DSC, this is decreasingly the case, and we need to be
+                * able to select less than maximum link config.
                 */
                limits.min_lane_count = limits.max_lane_count;
                limits.min_clock = limits.max_clock;
@@ -1995,12 +1888,25 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
                      intel_dp->common_rates[limits.max_clock],
                      limits.max_bpp, adjusted_mode->crtc_clock);
 
-       /*
-        * Optimize for slow and wide. This is the place to add alternative
-        * optimization policy.
-        */
-       if (!intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits))
-               return false;
+       if (intel_dp_is_edp(intel_dp)) {
+               /*
+                * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
+                * section A.1: "It is recommended that the minimum number of
+                * lanes be used, using the minimum link rate allowed for that
+                * lane configuration."
+                *
+                * Note that we use the max clock and lane count for eDP 1.3 and
+                * earlier, and fast vs. wide is irrelevant.
+                */
+               if (!intel_dp_compute_link_config_fast(intel_dp, pipe_config,
+                                                      &limits))
+                       return false;
+       } else {
+               /* Optimize for slow and wide. */
+               if (!intel_dp_compute_link_config_wide(intel_dp, pipe_config,
+                                                      &limits))
+                       return false;
+       }
 
        DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
                      pipe_config->lane_count, pipe_config->port_clock,
@@ -2023,6 +1929,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+       struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
        enum port port = encoder->port;
        struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
        struct intel_connector *intel_connector = intel_dp->attached_connector;
@@ -2034,6 +1941,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
                pipe_config->has_pch_encoder = true;
 
+       pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+       if (lspcon->active)
+               lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
+
        pipe_config->has_drrs = false;
        if (IS_G4X(dev_priv) || port == PORT_A)
                pipe_config->has_audio = false;
@@ -2338,7 +2249,8 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
        if (edp_have_panel_vdd(intel_dp))
                return need_to_disable;
 
-       intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+       intel_display_power_get(dev_priv,
+                               intel_aux_power_domain(intel_dig_port));
 
        DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
                      port_name(intel_dig_port->base.port));
@@ -2424,7 +2336,8 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
        if ((pp & PANEL_POWER_ON) == 0)
                intel_dp->panel_power_off_time = ktime_get_boottime();
 
-       intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+       intel_display_power_put(dev_priv,
+                               intel_aux_power_domain(intel_dig_port));
 }
 
 static void edp_panel_vdd_work(struct work_struct *__work)
@@ -2537,6 +2450,7 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
 static void edp_panel_off(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        u32 pp;
        i915_reg_t pp_ctrl_reg;
 
@@ -2546,10 +2460,10 @@ static void edp_panel_off(struct intel_dp *intel_dp)
                return;
 
        DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
-                     port_name(dp_to_dig_port(intel_dp)->base.port));
+                     port_name(dig_port->base.port));
 
        WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
-            port_name(dp_to_dig_port(intel_dp)->base.port));
+            port_name(dig_port->base.port));
 
        pp = ironlake_get_pp_control(intel_dp);
        /* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -2568,7 +2482,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
        intel_dp->panel_power_off_time = ktime_get_boottime();
 
        /* We got a reference when we enabled the VDD. */
-       intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+       intel_display_power_put(dev_priv, intel_aux_power_domain(dig_port));
 }
 
 void intel_edp_panel_off(struct intel_dp *intel_dp)
@@ -3900,6 +3814,41 @@ intel_dp_read_dpcd(struct intel_dp *intel_dp)
        return intel_dp->dpcd[DP_DPCD_REV] != 0;
 }
 
+static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
+{
+       /*
+        * Clear the cached register set to avoid using stale values
+        * for the sinks that do not support DSC.
+        */
+       memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
+
+       /* Clear fec_capable to avoid using stale values */
+       intel_dp->fec_capable = 0;
+
+       /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
+       if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
+           intel_dp->edp_dpcd[0] >= DP_EDP_14) {
+               if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
+                                    intel_dp->dsc_dpcd,
+                                    sizeof(intel_dp->dsc_dpcd)) < 0)
+                       DRM_ERROR("Failed to read DPCD register 0x%x\n",
+                                 DP_DSC_SUPPORT);
+
+               DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
+                             (int)sizeof(intel_dp->dsc_dpcd),
+                             intel_dp->dsc_dpcd);
+               /* FEC is supported only on DP 1.4 */
+               if (!intel_dp_is_edp(intel_dp)) {
+                       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
+                                             &intel_dp->fec_capable) < 0)
+                               DRM_ERROR("Failed to read FEC DPCD register\n");
+
+               DRM_DEBUG_KMS("FEC CAPABILITY: %x\n",
+                             intel_dp->fec_capable);
+               }
+       }
+}
+
 static bool
 intel_edp_init_dpcd(struct intel_dp *intel_dp)
 {
@@ -3976,6 +3925,10 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
 
        intel_dp_set_common_rates(intel_dp);
 
+       /* Read the eDP DSC DPCD registers */
+       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+               intel_dp_get_dsc_sink_cap(intel_dp);
+
        return true;
 }
 
@@ -4029,16 +3982,10 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
 }
 
 static bool
-intel_dp_can_mst(struct intel_dp *intel_dp)
+intel_dp_sink_can_mst(struct intel_dp *intel_dp)
 {
        u8 mstm_cap;
 
-       if (!i915_modparams.enable_dp_mst)
-               return false;
-
-       if (!intel_dp->can_mst)
-               return false;
-
        if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
                return false;
 
@@ -4048,33 +3995,35 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
        return mstm_cap & DP_MST_CAP;
 }
 
+static bool
+intel_dp_can_mst(struct intel_dp *intel_dp)
+{
+       return i915_modparams.enable_dp_mst &&
+               intel_dp->can_mst &&
+               intel_dp_sink_can_mst(intel_dp);
+}
+
 static void
 intel_dp_configure_mst(struct intel_dp *intel_dp)
 {
-       if (!i915_modparams.enable_dp_mst)
-               return;
+       struct intel_encoder *encoder =
+               &dp_to_dig_port(intel_dp)->base;
+       bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
+
+       DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
+                     port_name(encoder->port), yesno(intel_dp->can_mst),
+                     yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
 
        if (!intel_dp->can_mst)
                return;
 
-       intel_dp->is_mst = intel_dp_can_mst(intel_dp);
-
-       if (intel_dp->is_mst)
-               DRM_DEBUG_KMS("Sink is MST capable\n");
-       else
-               DRM_DEBUG_KMS("Sink is not MST capable\n");
+       intel_dp->is_mst = sink_can_mst &&
+               i915_modparams.enable_dp_mst;
 
        drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
                                        intel_dp->is_mst);
 }
 
-static bool
-intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
-{
-       return drm_dp_dpcd_readb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
-                                sink_irq_vector) == 1;
-}
-
 static bool
 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
 {
@@ -4083,6 +4032,91 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
                DP_DPRX_ESI_LEN;
 }
 
+u16 intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
+                               int mode_clock, int mode_hdisplay)
+{
+       u16 bits_per_pixel, max_bpp_small_joiner_ram;
+       int i;
+
+       /*
+        * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
+        * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
+        * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
+        * for MST -> TimeSlotsPerMTP has to be calculated
+        */
+       bits_per_pixel = (link_clock * lane_count * 8 *
+                         DP_DSC_FEC_OVERHEAD_FACTOR) /
+               mode_clock;
+
+       /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
+       max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
+               mode_hdisplay;
+
+       /*
+        * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
+        * check, output bpp from small joiner RAM check)
+        */
+       bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
+
+       /* Error out if the max bpp is less than smallest allowed valid bpp */
+       if (bits_per_pixel < valid_dsc_bpp[0]) {
+               DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
+               return 0;
+       }
+
+       /* Find the nearest match in the array of known BPPs from VESA */
+       for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
+               if (bits_per_pixel < valid_dsc_bpp[i + 1])
+                       break;
+       }
+       bits_per_pixel = valid_dsc_bpp[i];
+
+       /*
+        * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
+        * fractional part is 0
+        */
+       return bits_per_pixel << 4;
+}
+
+u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
+                               int mode_clock,
+                               int mode_hdisplay)
+{
+       u8 min_slice_count, i;
+       int max_slice_width;
+
+       if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
+               min_slice_count = DIV_ROUND_UP(mode_clock,
+                                              DP_DSC_MAX_ENC_THROUGHPUT_0);
+       else
+               min_slice_count = DIV_ROUND_UP(mode_clock,
+                                              DP_DSC_MAX_ENC_THROUGHPUT_1);
+
+       max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
+       if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
+               DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
+                             max_slice_width);
+               return 0;
+       }
+       /* Also take into account max slice width */
+       min_slice_count = min_t(uint8_t, min_slice_count,
+                               DIV_ROUND_UP(mode_hdisplay,
+                                            max_slice_width));
+
+       /* Find the closest match to the valid slice count values */
+       for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
+               if (valid_dsc_slicecount[i] >
+                   drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+                                                   false))
+                       break;
+               if (min_slice_count  <= valid_dsc_slicecount[i])
+                       return valid_dsc_slicecount[i];
+       }
+
+       DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
+       return 0;
+}
+
 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
 {
        int status = 0;
@@ -4403,7 +4437,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
 
        /* Suppress underruns caused by re-training */
        intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
-       if (crtc->config->has_pch_encoder)
+       if (crtc_state->has_pch_encoder)
                intel_set_pch_fifo_underrun_reporting(dev_priv,
                                                      intel_crtc_pch_transcoder(crtc), false);
 
@@ -4414,7 +4448,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
        intel_wait_for_vblank(dev_priv, crtc->pipe);
 
        intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
-       if (crtc->config->has_pch_encoder)
+       if (crtc_state->has_pch_encoder)
                intel_set_pch_fifo_underrun_reporting(dev_priv,
                                                      intel_crtc_pch_transcoder(crtc), true);
 
@@ -4462,6 +4496,29 @@ static bool intel_dp_hotplug(struct intel_encoder *encoder,
        return changed;
 }
 
+static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
+{
+       u8 val;
+
+       if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+               return;
+
+       if (drm_dp_dpcd_readb(&intel_dp->aux,
+                             DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
+               return;
+
+       drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
+
+       if (val & DP_AUTOMATED_TEST_REQUEST)
+               intel_dp_handle_test_request(intel_dp);
+
+       if (val & DP_CP_IRQ)
+               intel_hdcp_check_link(intel_dp->attached_connector);
+
+       if (val & DP_SINK_SPECIFIC_IRQ)
+               DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
+}
+
 /*
  * According to DP spec
  * 5.1.2:
@@ -4479,7 +4536,6 @@ static bool
 intel_dp_short_pulse(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       u8 sink_irq_vector = 0;
        u8 old_sink_count = intel_dp->sink_count;
        bool ret;
 
@@ -4502,20 +4558,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
                return false;
        }
 
-       /* Try to read the source of the interrupt */
-       if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
-           intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
-           sink_irq_vector != 0) {
-               /* Clear interrupt source */
-               drm_dp_dpcd_writeb(&intel_dp->aux,
-                                  DP_DEVICE_SERVICE_IRQ_VECTOR,
-                                  sink_irq_vector);
-
-               if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
-                       intel_dp_handle_test_request(intel_dp);
-               if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
-                       DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
-       }
+       intel_dp_check_service_irq(intel_dp);
 
        /* Handle CEC interrupts, if any */
        drm_dp_cec_irq(&intel_dp->aux);
@@ -4810,6 +4853,9 @@ static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
                              type_str);
 }
 
+static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
+                                 struct intel_digital_port *dig_port);
+
 /*
  * This function implements the first part of the Connect Flow described by our
  * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
@@ -4864,9 +4910,7 @@ static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
        if (dig_port->tc_type == TC_PORT_TYPEC &&
            !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) {
                DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port);
-               val = I915_READ(PORT_TX_DFLEXDPCSSS);
-               val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
-               I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
+               icl_tc_phy_disconnect(dev_priv, dig_port);
                return false;
        }
 
@@ -4881,21 +4925,24 @@ static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
                                  struct intel_digital_port *dig_port)
 {
        enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
-       u32 val;
 
-       if (dig_port->tc_type != TC_PORT_LEGACY &&
-           dig_port->tc_type != TC_PORT_TYPEC)
+       if (dig_port->tc_type == TC_PORT_UNKNOWN)
                return;
 
        /*
-        * This function may be called many times in a row without an HPD event
-        * in between, so try to avoid the write when we can.
+        * TBT disconnection flow is read the live status, what was done in
+        * caller.
         */
-       val = I915_READ(PORT_TX_DFLEXDPCSSS);
-       if (val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port)) {
+       if (dig_port->tc_type == TC_PORT_TYPEC ||
+           dig_port->tc_type == TC_PORT_LEGACY) {
+               u32 val;
+
+               val = I915_READ(PORT_TX_DFLEXDPCSSS);
                val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
                I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
        }
+
+       dig_port->tc_type = TC_PORT_UNKNOWN;
 }
 
 /*
@@ -4945,19 +4992,14 @@ static bool icl_digital_port_connected(struct intel_encoder *encoder)
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
 
-       switch (encoder->hpd_pin) {
-       case HPD_PORT_A:
-       case HPD_PORT_B:
+       if (intel_port_is_combophy(dev_priv, encoder->port))
                return icl_combo_port_connected(dev_priv, dig_port);
-       case HPD_PORT_C:
-       case HPD_PORT_D:
-       case HPD_PORT_E:
-       case HPD_PORT_F:
+       else if (intel_port_is_tc(dev_priv, encoder->port))
                return icl_tc_port_connected(dev_priv, dig_port);
-       default:
+       else
                MISSING_CASE(encoder->hpd_pin);
-               return false;
-       }
+
+       return false;
 }
 
 /*
@@ -4982,20 +5024,23 @@ bool intel_digital_port_connected(struct intel_encoder *encoder)
                        return g4x_digital_port_connected(encoder);
        }
 
-       if (IS_GEN5(dev_priv))
-               return ilk_digital_port_connected(encoder);
-       else if (IS_GEN6(dev_priv))
-               return snb_digital_port_connected(encoder);
-       else if (IS_GEN7(dev_priv))
-               return ivb_digital_port_connected(encoder);
-       else if (IS_GEN8(dev_priv))
-               return bdw_digital_port_connected(encoder);
+       if (INTEL_GEN(dev_priv) >= 11)
+               return icl_digital_port_connected(encoder);
+       else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv))
+               return spt_digital_port_connected(encoder);
        else if (IS_GEN9_LP(dev_priv))
                return bxt_digital_port_connected(encoder);
-       else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv))
-               return spt_digital_port_connected(encoder);
-       else
-               return icl_digital_port_connected(encoder);
+       else if (IS_GEN8(dev_priv))
+               return bdw_digital_port_connected(encoder);
+       else if (IS_GEN7(dev_priv))
+               return ivb_digital_port_connected(encoder);
+       else if (IS_GEN6(dev_priv))
+               return snb_digital_port_connected(encoder);
+       else if (IS_GEN5(dev_priv))
+               return ilk_digital_port_connected(encoder);
+
+       MISSING_CASE(INTEL_GEN(dev_priv));
+       return false;
 }
 
 static struct edid *
@@ -5042,28 +5087,35 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
 }
 
 static int
-intel_dp_long_pulse(struct intel_connector *connector,
-                   struct drm_modeset_acquire_ctx *ctx)
+intel_dp_detect(struct drm_connector *connector,
+               struct drm_modeset_acquire_ctx *ctx,
+               bool force)
 {
-       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-       struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
+       struct intel_dp *intel_dp = intel_attached_dp(connector);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct intel_encoder *encoder = &dig_port->base;
        enum drm_connector_status status;
-       u8 sink_irq_vector = 0;
+       enum intel_display_power_domain aux_domain =
+               intel_aux_power_domain(dig_port);
 
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                     connector->base.id, connector->name);
        WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
 
-       intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+       intel_display_power_get(dev_priv, aux_domain);
 
        /* Can't disconnect eDP */
        if (intel_dp_is_edp(intel_dp))
                status = edp_detect(intel_dp);
-       else if (intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base))
+       else if (intel_digital_port_connected(encoder))
                status = intel_dp_detect_dpcd(intel_dp);
        else
                status = connector_status_disconnected;
 
        if (status == connector_status_disconnected) {
                memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
+               memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
 
                if (intel_dp->is_mst) {
                        DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
@@ -5089,6 +5141,10 @@ intel_dp_long_pulse(struct intel_connector *connector,
 
        intel_dp_print_rates(intel_dp);
 
+       /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
+       if (INTEL_GEN(dev_priv) >= 11)
+               intel_dp_get_dsc_sink_cap(intel_dp);
+
        drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
                         drm_dp_is_branch(intel_dp->dpcd));
 
@@ -5109,9 +5165,13 @@ intel_dp_long_pulse(struct intel_connector *connector,
         * with an IRQ_HPD, so force a link status check.
         */
        if (!intel_dp_is_edp(intel_dp)) {
-               struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+               int ret;
 
-               intel_dp_retrain_link(encoder, ctx);
+               ret = intel_dp_retrain_link(encoder, ctx);
+               if (ret) {
+                       intel_display_power_put(dev_priv, aux_domain);
+                       return ret;
+               }
        }
 
        /*
@@ -5123,61 +5183,17 @@ intel_dp_long_pulse(struct intel_connector *connector,
        intel_dp->aux.i2c_defer_count = 0;
 
        intel_dp_set_edid(intel_dp);
-       if (intel_dp_is_edp(intel_dp) || connector->detect_edid)
+       if (intel_dp_is_edp(intel_dp) ||
+           to_intel_connector(connector)->detect_edid)
                status = connector_status_connected;
-       intel_dp->detect_done = true;
 
-       /* Try to read the source of the interrupt */
-       if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
-           intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
-           sink_irq_vector != 0) {
-               /* Clear interrupt source */
-               drm_dp_dpcd_writeb(&intel_dp->aux,
-                                  DP_DEVICE_SERVICE_IRQ_VECTOR,
-                                  sink_irq_vector);
-
-               if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
-                       intel_dp_handle_test_request(intel_dp);
-               if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
-                       DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
-       }
+       intel_dp_check_service_irq(intel_dp);
 
 out:
        if (status != connector_status_connected && !intel_dp->is_mst)
                intel_dp_unset_edid(intel_dp);
 
-       intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
-       return status;
-}
-
-static int
-intel_dp_detect(struct drm_connector *connector,
-               struct drm_modeset_acquire_ctx *ctx,
-               bool force)
-{
-       struct intel_dp *intel_dp = intel_attached_dp(connector);
-       int status = connector->status;
-
-       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-                     connector->base.id, connector->name);
-
-       /* If full detect is not performed yet, do a full detect */
-       if (!intel_dp->detect_done) {
-               struct drm_crtc *crtc;
-               int ret;
-
-               crtc = connector->state->crtc;
-               if (crtc) {
-                       ret = drm_modeset_lock(&crtc->mutex, ctx);
-                       if (ret)
-                               return ret;
-               }
-
-               status = intel_dp_long_pulse(intel_dp->attached_connector, ctx);
-       }
-
-       intel_dp->detect_done = false;
-
+       intel_display_power_put(dev_priv, aux_domain);
        return status;
 }
 
@@ -5185,8 +5201,11 @@ static void
 intel_dp_force(struct drm_connector *connector)
 {
        struct intel_dp *intel_dp = intel_attached_dp(connector);
-       struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct intel_encoder *intel_encoder = &dig_port->base;
        struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+       enum intel_display_power_domain aux_domain =
+               intel_aux_power_domain(dig_port);
 
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
                      connector->base.id, connector->name);
@@ -5195,11 +5214,11 @@ intel_dp_force(struct drm_connector *connector)
        if (connector->status != connector_status_connected)
                return;
 
-       intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+       intel_display_power_get(dev_priv, aux_domain);
 
        intel_dp_set_edid(intel_dp);
 
-       intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+       intel_display_power_put(dev_priv, aux_domain);
 }
 
 static int intel_dp_get_modes(struct drm_connector *connector)
@@ -5264,27 +5283,6 @@ intel_dp_connector_unregister(struct drm_connector *connector)
        intel_connector_unregister(connector);
 }
 
-static void
-intel_dp_connector_destroy(struct drm_connector *connector)
-{
-       struct intel_connector *intel_connector = to_intel_connector(connector);
-
-       kfree(intel_connector->detect_edid);
-
-       if (!IS_ERR_OR_NULL(intel_connector->edid))
-               kfree(intel_connector->edid);
-
-       /*
-        * Can't call intel_dp_is_edp() since the encoder may have been
-        * destroyed already.
-        */
-       if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
-               intel_panel_fini(&intel_connector->panel);
-
-       drm_connector_cleanup(connector);
-       kfree(connector);
-}
-
 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
 {
        struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
@@ -5348,7 +5346,8 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
        dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
                                     an, DRM_HDCP_AN_LEN);
        if (dpcd_ret != DRM_HDCP_AN_LEN) {
-               DRM_ERROR("Failed to write An over DP/AUX (%zd)\n", dpcd_ret);
+               DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
+                             dpcd_ret);
                return dpcd_ret >= 0 ? -EIO : dpcd_ret;
        }
 
@@ -5364,10 +5363,10 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
                                rxbuf, sizeof(rxbuf),
                                DP_AUX_CH_CTL_AUX_AKSV_SELECT);
        if (ret < 0) {
-               DRM_ERROR("Write Aksv over DP/AUX failed (%d)\n", ret);
+               DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
                return ret;
        } else if (ret == 0) {
-               DRM_ERROR("Aksv write over DP/AUX was empty\n");
+               DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
                return -EIO;
        }
 
@@ -5382,7 +5381,7 @@ static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
        ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
                               DRM_HDCP_KSV_LEN);
        if (ret != DRM_HDCP_KSV_LEN) {
-               DRM_ERROR("Read Bksv from DP/AUX failed (%zd)\n", ret);
+               DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
                return ret >= 0 ? -EIO : ret;
        }
        return 0;
@@ -5400,7 +5399,7 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
        ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
                               bstatus, DRM_HDCP_BSTATUS_LEN);
        if (ret != DRM_HDCP_BSTATUS_LEN) {
-               DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
+               DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
                return ret >= 0 ? -EIO : ret;
        }
        return 0;
@@ -5415,7 +5414,7 @@ int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
        ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
                               bcaps, 1);
        if (ret != 1) {
-               DRM_ERROR("Read bcaps from DP/AUX failed (%zd)\n", ret);
+               DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
                return ret >= 0 ? -EIO : ret;
        }
 
@@ -5445,7 +5444,7 @@ int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
        ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
                               ri_prime, DRM_HDCP_RI_LEN);
        if (ret != DRM_HDCP_RI_LEN) {
-               DRM_ERROR("Read Ri' from DP/AUX failed (%zd)\n", ret);
+               DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
                return ret >= 0 ? -EIO : ret;
        }
        return 0;
@@ -5460,7 +5459,7 @@ int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
        ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
                               &bstatus, 1);
        if (ret != 1) {
-               DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
+               DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
                return ret >= 0 ? -EIO : ret;
        }
        *ksv_ready = bstatus & DP_BSTATUS_READY;
@@ -5482,8 +5481,8 @@ int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
                                       ksv_fifo + i * DRM_HDCP_KSV_LEN,
                                       len);
                if (ret != len) {
-                       DRM_ERROR("Read ksv[%d] from DP/AUX failed (%zd)\n", i,
-                                 ret);
+                       DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
+                                     i, ret);
                        return ret >= 0 ? -EIO : ret;
                }
        }
@@ -5503,7 +5502,7 @@ int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
                               DP_AUX_HDCP_V_PRIME(i), part,
                               DRM_HDCP_V_PRIME_PART_LEN);
        if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
-               DRM_ERROR("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
+               DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
                return ret >= 0 ? -EIO : ret;
        }
        return 0;
@@ -5526,7 +5525,7 @@ bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
        ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
                               &bstatus, 1);
        if (ret != 1) {
-               DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
+               DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
                return false;
        }
 
@@ -5565,6 +5564,7 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
@@ -5578,7 +5578,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
         * indefinitely.
         */
        DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
-       intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+       intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
 
        edp_panel_vdd_schedule_off(intel_dp);
 }
@@ -5631,7 +5631,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
        .atomic_set_property = intel_digital_connector_atomic_set_property,
        .late_register = intel_dp_connector_register,
        .early_unregister = intel_dp_connector_unregister,
-       .destroy = intel_dp_connector_destroy,
+       .destroy = intel_connector_destroy,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
        .atomic_duplicate_state = intel_digital_connector_duplicate_state,
 };
@@ -5673,11 +5673,11 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
 
        if (long_hpd) {
                intel_dp->reset_link_params = true;
-               intel_dp->detect_done = false;
                return IRQ_NONE;
        }
 
-       intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+       intel_display_power_get(dev_priv,
+                               intel_aux_power_domain(intel_dig_port));
 
        if (intel_dp->is_mst) {
                if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
@@ -5690,7 +5690,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
                        intel_dp->is_mst = false;
                        drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
                                                        intel_dp->is_mst);
-                       intel_dp->detect_done = false;
                        goto put_power;
                }
        }
@@ -5700,19 +5699,15 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
 
                handled = intel_dp_short_pulse(intel_dp);
 
-               /* Short pulse can signify loss of hdcp authentication */
-               intel_hdcp_check_link(intel_dp->attached_connector);
-
-               if (!handled) {
-                       intel_dp->detect_done = false;
+               if (!handled)
                        goto put_power;
-               }
        }
 
        ret = IRQ_HANDLED;
 
 put_power:
-       intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+       intel_display_power_put(dev_priv,
+                               intel_aux_power_domain(intel_dig_port));
 
        return ret;
 }
@@ -5743,6 +5738,10 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
                intel_attach_force_audio_property(connector);
 
        intel_attach_broadcast_rgb_property(connector);
+       if (HAS_GMCH_DISPLAY(dev_priv))
+               drm_connector_attach_max_bpc_property(connector, 6, 10);
+       else if (INTEL_GEN(dev_priv) >= 5)
+               drm_connector_attach_max_bpc_property(connector, 6, 12);
 
        if (intel_dp_is_edp(intel_dp)) {
                u32 allowed_scalers;
@@ -6099,10 +6098,10 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
        if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
                switch (index) {
                case DRRS_HIGH_RR:
-                       intel_dp_set_m_n(intel_crtc, M1_N1);
+                       intel_dp_set_m_n(crtc_state, M1_N1);
                        break;
                case DRRS_LOW_RR:
-                       intel_dp_set_m_n(intel_crtc, M2_N2);
+                       intel_dp_set_m_n(crtc_state, M2_N2);
                        break;
                case DRRS_MAX_RR:
                default:
@@ -6422,6 +6421,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        if (!intel_dp_is_edp(intel_dp))
                return true;
 
+       INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
+
        /*
         * On IBX/CPT we may get here with LVDS already registered. Since the
         * driver uses the only internal power sequencer available for both
@@ -6514,6 +6515,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        intel_connector->panel.backlight.power = intel_edp_backlight_power;
        intel_panel_setup_backlight(connector, pipe);
 
+       if (fixed_mode)
+               drm_connector_init_panel_orientation_property(
+                       connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
+
        return true;
 
 out_vdd_off:
@@ -6624,9 +6629,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 
        intel_dp_aux_init(intel_dp);
 
-       INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
-                         edp_panel_vdd_work);
-
        intel_connector_attach_encoder(intel_connector, intel_encoder);
 
        if (HAS_DDI(dev_priv))
@@ -6743,6 +6745,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
        if (port != PORT_A)
                intel_infoframe_init(intel_dig_port);
 
+       intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
        if (!intel_dp_init_connector(intel_dig_port, intel_connector))
                goto err_init_connector;
 
index 1b00f8ea145ba3990d17f6e7142755bae8ca6a77..4de247ddf05f80a89fc4d84aeaf93112c4d468f2 100644 (file)
@@ -51,6 +51,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
                return false;
 
+       pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
        pipe_config->has_pch_encoder = false;
        bpp = 24;
        if (intel_dp->compliance.test_data.bpc) {
@@ -208,12 +209,25 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
        struct intel_digital_port *intel_dig_port = intel_mst->primary;
        struct intel_dp *intel_dp = &intel_dig_port->dp;
 
-       if (intel_dp->active_mst_links == 0 &&
-           intel_dig_port->base.pre_pll_enable)
+       if (intel_dp->active_mst_links == 0)
                intel_dig_port->base.pre_pll_enable(&intel_dig_port->base,
                                                    pipe_config, NULL);
 }
 
+static void intel_mst_post_pll_disable_dp(struct intel_encoder *encoder,
+                                         const struct intel_crtc_state *old_crtc_state,
+                                         const struct drm_connector_state *old_conn_state)
+{
+       struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+       struct intel_digital_port *intel_dig_port = intel_mst->primary;
+       struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+       if (intel_dp->active_mst_links == 0)
+               intel_dig_port->base.post_pll_disable(&intel_dig_port->base,
+                                                     old_crtc_state,
+                                                     old_conn_state);
+}
+
 static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
                                    const struct intel_crtc_state *pipe_config,
                                    const struct drm_connector_state *conn_state)
@@ -335,24 +349,12 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
                                      intel_connector->port);
 }
 
-static void
-intel_dp_mst_connector_destroy(struct drm_connector *connector)
-{
-       struct intel_connector *intel_connector = to_intel_connector(connector);
-
-       if (!IS_ERR_OR_NULL(intel_connector->edid))
-               kfree(intel_connector->edid);
-
-       drm_connector_cleanup(connector);
-       kfree(connector);
-}
-
 static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
        .detect = intel_dp_mst_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .late_register = intel_connector_register,
        .early_unregister = intel_connector_unregister,
-       .destroy = intel_dp_mst_connector_destroy,
+       .destroy = intel_connector_destroy,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
        .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
 };
@@ -452,6 +454,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
        if (!intel_connector)
                return NULL;
 
+       intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
+       intel_connector->mst_port = intel_dp;
+       intel_connector->port = port;
+
        connector = &intel_connector->base;
        ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
                                 DRM_MODE_CONNECTOR_DisplayPort);
@@ -462,10 +468,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
 
        drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
 
-       intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
-       intel_connector->mst_port = intel_dp;
-       intel_connector->port = port;
-
        for_each_pipe(dev_priv, pipe) {
                struct drm_encoder *enc =
                        &intel_dp->mst_encoders[pipe]->base.base;
@@ -560,6 +562,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
        intel_encoder->disable = intel_mst_disable_dp;
        intel_encoder->post_disable = intel_mst_post_disable_dp;
        intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
+       intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp;
        intel_encoder->pre_enable = intel_mst_pre_enable_dp;
        intel_encoder->enable = intel_mst_enable_dp;
        intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
index 00b3ab656b06d0f2d49bb87922d78813299f9752..3c7f10d1765824ba32d0899b6e50e84d488518e8 100644 (file)
@@ -748,7 +748,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
                val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
        vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
 
-       if (crtc->config->lane_count > 2) {
+       if (crtc_state->lane_count > 2) {
                val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
                if (reset)
                        val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
@@ -765,7 +765,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
                val |= DPIO_PCS_CLK_SOFT_RESET;
        vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
 
-       if (crtc->config->lane_count > 2) {
+       if (crtc_state->lane_count > 2) {
                val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
                val |= CHV_PCS_REQ_SOFTRESET_EN;
                if (reset)
index e6cac9225536a6ce39d44d6f898e6577b042dba0..901e15063b24e508eeec6155673e7ecd2ba2e464 100644 (file)
@@ -126,16 +126,16 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
 
 /**
  * intel_prepare_shared_dpll - call a dpll's prepare hook
- * @crtc: CRTC which has a shared dpll
+ * @crtc_state: CRTC, and its state, which has a shared dpll
  *
  * This calls the PLL's prepare hook if it has one and if the PLL is not
  * already enabled. The prepare hook is platform specific.
  */
-void intel_prepare_shared_dpll(struct intel_crtc *crtc)
+void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_shared_dpll *pll = crtc->config->shared_dpll;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 
        if (WARN_ON(pll == NULL))
                return;
@@ -154,15 +154,15 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc)
 
 /**
  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
- * @crtc: CRTC which has a shared DPLL
+ * @crtc_state: CRTC, and its state, which has a shared DPLL
  *
  * Enable the shared DPLL used by @crtc.
  */
-void intel_enable_shared_dpll(struct intel_crtc *crtc)
+void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_shared_dpll *pll = crtc->config->shared_dpll;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_shared_dpll *pll = crtc_state->shared_dpll;
        unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
        unsigned int old_mask;
 
@@ -199,14 +199,15 @@ out:
 
 /**
  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
- * @crtc: CRTC which has a shared DPLL
+ * @crtc_state: CRTC, and its state, which has a shared DPLL
  *
  * Disable the shared DPLL used by @crtc.
  */
-void intel_disable_shared_dpll(struct intel_crtc *crtc)
+void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
 {
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_shared_dpll *pll = crtc->config->shared_dpll;
+       struct intel_shared_dpll *pll = crtc_state->shared_dpll;
        unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
 
        /* PCH only available on ILK+ */
@@ -409,14 +410,6 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
                                 struct intel_shared_dpll *pll)
 {
        const enum intel_dpll_id id = pll->info->id;
-       struct drm_device *dev = &dev_priv->drm;
-       struct intel_crtc *crtc;
-
-       /* Make sure no transcoder isn't still depending on us. */
-       for_each_intel_crtc(dev, crtc) {
-               if (crtc->config->shared_dpll == pll)
-                       assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
-       }
 
        I915_WRITE(PCH_DPLL(id), 0);
        POSTING_READ(PCH_DPLL(id));
@@ -2628,11 +2621,16 @@ static enum port icl_mg_pll_id_to_port(enum intel_dpll_id id)
        return id - DPLL_ID_ICL_MGPLL1 + PORT_C;
 }
 
-static enum intel_dpll_id icl_port_to_mg_pll_id(enum port port)
+enum intel_dpll_id icl_port_to_mg_pll_id(enum port port)
 {
        return port - PORT_C + DPLL_ID_ICL_MGPLL1;
 }
 
+bool intel_dpll_is_combophy(enum intel_dpll_id id)
+{
+       return id == DPLL_ID_ICL_DPLL0 || id == DPLL_ID_ICL_DPLL1;
+}
+
 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
                                     uint32_t *target_dco_khz,
                                     struct intel_dpll_hw_state *state)
@@ -2874,8 +2872,8 @@ static struct intel_shared_dpll *
 icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
             struct intel_encoder *encoder)
 {
-       struct intel_digital_port *intel_dig_port =
-                       enc_to_dig_port(&encoder->base);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_digital_port *intel_dig_port;
        struct intel_shared_dpll *pll;
        struct intel_dpll_hw_state pll_state = {};
        enum port port = encoder->port;
@@ -2883,18 +2881,21 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
        int clock = crtc_state->port_clock;
        bool ret;
 
-       switch (port) {
-       case PORT_A:
-       case PORT_B:
+       if (intel_port_is_combophy(dev_priv, port)) {
                min = DPLL_ID_ICL_DPLL0;
                max = DPLL_ID_ICL_DPLL1;
                ret = icl_calc_dpll_state(crtc_state, encoder, clock,
                                          &pll_state);
-               break;
-       case PORT_C:
-       case PORT_D:
-       case PORT_E:
-       case PORT_F:
+       } else if (intel_port_is_tc(dev_priv, port)) {
+               if (encoder->type == INTEL_OUTPUT_DP_MST) {
+                       struct intel_dp_mst_encoder *mst_encoder;
+
+                       mst_encoder = enc_to_mst(&encoder->base);
+                       intel_dig_port = mst_encoder->primary;
+               } else {
+                       intel_dig_port = enc_to_dig_port(&encoder->base);
+               }
+
                if (intel_dig_port->tc_type == TC_PORT_TBT) {
                        min = DPLL_ID_ICL_TBTPLL;
                        max = min;
@@ -2906,8 +2907,7 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
                        ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
                                                    &pll_state);
                }
-               break;
-       default:
+       } else {
                MISSING_CASE(port);
                return NULL;
        }
@@ -2932,21 +2932,16 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
 
 static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id)
 {
-       switch (id) {
-       default:
-               MISSING_CASE(id);
-               /* fall through */
-       case DPLL_ID_ICL_DPLL0:
-       case DPLL_ID_ICL_DPLL1:
+       if (intel_dpll_is_combophy(id))
                return CNL_DPLL_ENABLE(id);
-       case DPLL_ID_ICL_TBTPLL:
+       else if (id == DPLL_ID_ICL_TBTPLL)
                return TBT_PLL_ENABLE;
-       case DPLL_ID_ICL_MGPLL1:
-       case DPLL_ID_ICL_MGPLL2:
-       case DPLL_ID_ICL_MGPLL3:
-       case DPLL_ID_ICL_MGPLL4:
+       else
+               /*
+                * TODO: Make MG_PLL macros use
+                * tc port id instead of port id
+                */
                return MG_PLL_ENABLE(icl_mg_pll_id_to_port(id));
-       }
 }
 
 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -2965,17 +2960,11 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
        if (!(val & PLL_ENABLE))
                goto out;
 
-       switch (id) {
-       case DPLL_ID_ICL_DPLL0:
-       case DPLL_ID_ICL_DPLL1:
-       case DPLL_ID_ICL_TBTPLL:
+       if (intel_dpll_is_combophy(id) ||
+           id == DPLL_ID_ICL_TBTPLL) {
                hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
                hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
-               break;
-       case DPLL_ID_ICL_MGPLL1:
-       case DPLL_ID_ICL_MGPLL2:
-       case DPLL_ID_ICL_MGPLL3:
-       case DPLL_ID_ICL_MGPLL4:
+       } else {
                port = icl_mg_pll_id_to_port(id);
                hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port));
                hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
@@ -3013,9 +3002,6 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
 
                hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
                hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
-               break;
-       default:
-               MISSING_CASE(id);
        }
 
        ret = true;
@@ -3104,21 +3090,10 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
                                    PLL_POWER_STATE, 1))
                DRM_ERROR("PLL %d Power not enabled\n", id);
 
-       switch (id) {
-       case DPLL_ID_ICL_DPLL0:
-       case DPLL_ID_ICL_DPLL1:
-       case DPLL_ID_ICL_TBTPLL:
+       if (intel_dpll_is_combophy(id) || id == DPLL_ID_ICL_TBTPLL)
                icl_dpll_write(dev_priv, pll);
-               break;
-       case DPLL_ID_ICL_MGPLL1:
-       case DPLL_ID_ICL_MGPLL2:
-       case DPLL_ID_ICL_MGPLL3:
-       case DPLL_ID_ICL_MGPLL4:
+       else
                icl_mg_pll_write(dev_priv, pll);
-               break;
-       default:
-               MISSING_CASE(id);
-       }
 
        /*
         * DVFS pre sequence would be here, but in our driver the cdclk code
index bf0de8a4dc6378c9bd6de5432449ec1afac10bd1..a033d8f06d4a80f726b13287067a6953b326f14d 100644 (file)
@@ -334,9 +334,9 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
 void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
                               struct intel_crtc *crtc,
                               struct drm_atomic_state *state);
-void intel_prepare_shared_dpll(struct intel_crtc *crtc);
-void intel_enable_shared_dpll(struct intel_crtc *crtc);
-void intel_disable_shared_dpll(struct intel_crtc *crtc);
+void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state);
+void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
+void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
 void intel_shared_dpll_swap_state(struct drm_atomic_state *state);
 void intel_shared_dpll_init(struct drm_device *dev);
 
@@ -345,5 +345,7 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
 int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
                               uint32_t pll_id);
 int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
+enum intel_dpll_id icl_port_to_mg_pll_id(enum port port);
+bool intel_dpll_is_combophy(enum intel_dpll_id id);
 
 #endif /* _INTEL_DPLL_MGR_H_ */
index f8dc84b2d2d3443dcd47232f013bddb4ae9731f8..a7d9ac9121259685df03273726a1ad4e70e376ca 100644 (file)
@@ -381,6 +381,15 @@ struct intel_hdcp_shim {
                            bool *hdcp_capable);
 };
 
+struct intel_hdcp {
+       const struct intel_hdcp_shim *shim;
+       /* Mutex for hdcp state of the connector */
+       struct mutex mutex;
+       u64 value;
+       struct delayed_work check_work;
+       struct work_struct prop_work;
+};
+
 struct intel_connector {
        struct drm_connector base;
        /*
@@ -413,11 +422,7 @@ struct intel_connector {
        /* Work struct to schedule a uevent on link train failure */
        struct work_struct modeset_retry_work;
 
-       const struct intel_hdcp_shim *hdcp_shim;
-       struct mutex hdcp_mutex;
-       uint64_t hdcp_value; /* protected by hdcp_mutex */
-       struct delayed_work hdcp_check_work;
-       struct work_struct hdcp_prop_work;
+       struct intel_hdcp hdcp;
 };
 
 struct intel_digital_connector_state {
@@ -539,6 +544,26 @@ struct intel_plane_state {
         */
        int scaler_id;
 
+       /*
+        * linked_plane:
+        *
+        * ICL planar formats require 2 planes that are updated as pairs.
+        * This member is used to make sure the other plane is also updated
+        * when required, and for update_slave() to find the correct
+        * plane_state to pass as argument.
+        */
+       struct intel_plane *linked_plane;
+
+       /*
+        * slave:
+        * If set don't update use the linked plane's state for updating
+        * this plane during atomic commit with the update_slave() callback.
+        *
+        * It's also used by the watermark code to ignore wm calculations on
+        * this plane. They're calculated by the linked plane's wm code.
+        */
+       u32 slave;
+
        struct drm_intel_sprite_colorkey ckey;
 };
 
@@ -547,6 +572,7 @@ struct intel_initial_plane_config {
        unsigned int tiling;
        int size;
        u32 base;
+       u8 rotation;
 };
 
 #define SKL_MIN_SRC_W 8
@@ -712,6 +738,13 @@ struct intel_crtc_wm_state {
        bool need_postvbl_update;
 };
 
+enum intel_output_format {
+       INTEL_OUTPUT_FORMAT_INVALID,
+       INTEL_OUTPUT_FORMAT_RGB,
+       INTEL_OUTPUT_FORMAT_YCBCR420,
+       INTEL_OUTPUT_FORMAT_YCBCR444,
+};
+
 struct intel_crtc_state {
        struct drm_crtc_state base;
 
@@ -899,8 +932,11 @@ struct intel_crtc_state {
        /* HDMI High TMDS char rate ratio */
        bool hdmi_high_tmds_clock_ratio;
 
-       /* output format is YCBCR 4:2:0 */
-       bool ycbcr420;
+       /* Output format RGB/YCBCR etc */
+       enum intel_output_format output_format;
+
+       /* Output down scaling is done in LSPCON device */
+       bool lspcon_downsampling;
 };
 
 struct intel_crtc {
@@ -973,6 +1009,9 @@ struct intel_plane {
        void (*update_plane)(struct intel_plane *plane,
                             const struct intel_crtc_state *crtc_state,
                             const struct intel_plane_state *plane_state);
+       void (*update_slave)(struct intel_plane *plane,
+                            const struct intel_crtc_state *crtc_state,
+                            const struct intel_plane_state *plane_state);
        void (*disable_plane)(struct intel_plane *plane,
                              struct intel_crtc *crtc);
        bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
@@ -1070,13 +1109,13 @@ struct intel_dp {
        bool link_mst;
        bool link_trained;
        bool has_audio;
-       bool detect_done;
        bool reset_link_params;
-       enum aux_ch aux_ch;
        uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
        uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
        uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
        uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
+       u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE];
+       u8 fec_capable;
        /* source rates */
        int num_source_rates;
        const int *source_rates;
@@ -1094,7 +1133,6 @@ struct intel_dp {
        /* sink or branch descriptor */
        struct drm_dp_desc desc;
        struct drm_dp_aux aux;
-       enum intel_display_power_domain aux_power_domain;
        uint8_t train_set[4];
        int panel_power_up_delay;
        int panel_power_down_delay;
@@ -1156,9 +1194,15 @@ struct intel_dp {
        struct intel_dp_compliance compliance;
 };
 
+enum lspcon_vendor {
+       LSPCON_VENDOR_MCA,
+       LSPCON_VENDOR_PARADE
+};
+
 struct intel_lspcon {
        bool active;
        enum drm_lspcon_mode mode;
+       enum lspcon_vendor vendor;
 };
 
 struct intel_digital_port {
@@ -1170,18 +1214,20 @@ struct intel_digital_port {
        enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
        bool release_cl2_override;
        uint8_t max_lanes;
+       /* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
+       enum aux_ch aux_ch;
        enum intel_display_power_domain ddi_io_power_domain;
        enum tc_port_type tc_type;
 
-       void (*write_infoframe)(struct drm_encoder *encoder,
+       void (*write_infoframe)(struct intel_encoder *encoder,
                                const struct intel_crtc_state *crtc_state,
                                unsigned int type,
                                const void *frame, ssize_t len);
-       void (*set_infoframes)(struct drm_encoder *encoder,
+       void (*set_infoframes)(struct intel_encoder *encoder,
                               bool enable,
                               const struct intel_crtc_state *crtc_state,
                               const struct drm_connector_state *conn_state);
-       bool (*infoframe_enabled)(struct drm_encoder *encoder,
+       bool (*infoframe_enabled)(struct intel_encoder *encoder,
                                  const struct intel_crtc_state *pipe_config);
 };
 
@@ -1281,6 +1327,12 @@ enc_to_dig_port(struct drm_encoder *encoder)
                return NULL;
 }
 
+static inline struct intel_digital_port *
+conn_to_dig_port(struct intel_connector *connector)
+{
+       return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
+}
+
 static inline struct intel_dp_mst_encoder *
 enc_to_mst(struct drm_encoder *encoder)
 {
@@ -1306,6 +1358,12 @@ static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
        }
 }
 
+static inline struct intel_lspcon *
+enc_to_intel_lspcon(struct drm_encoder *encoder)
+{
+       return &enc_to_dig_port(encoder)->lspcon;
+}
+
 static inline struct intel_digital_port *
 dp_to_dig_port(struct intel_dp *intel_dp)
 {
@@ -1330,6 +1388,27 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
        return container_of(intel_hdmi, struct intel_digital_port, hdmi);
 }
 
+static inline struct intel_plane_state *
+intel_atomic_get_plane_state(struct intel_atomic_state *state,
+                                struct intel_plane *plane)
+{
+       struct drm_plane_state *ret =
+               drm_atomic_get_plane_state(&state->base, &plane->base);
+
+       if (IS_ERR(ret))
+               return ERR_CAST(ret);
+
+       return to_intel_plane_state(ret);
+}
+
+static inline struct intel_plane_state *
+intel_atomic_get_old_plane_state(struct intel_atomic_state *state,
+                                struct intel_plane *plane)
+{
+       return to_intel_plane_state(drm_atomic_get_old_plane_state(&state->base,
+                                                                  &plane->base));
+}
+
 static inline struct intel_plane_state *
 intel_atomic_get_new_plane_state(struct intel_atomic_state *state,
                                 struct intel_plane *plane)
@@ -1444,6 +1523,7 @@ void icl_map_plls_to_ports(struct drm_crtc *crtc,
 void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
                             struct intel_crtc_state *crtc_state,
                             struct drm_atomic_state *old_state);
+void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
 
 unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
                                   int color_plane, unsigned int height);
@@ -1488,7 +1568,6 @@ void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
-void intel_update_rawclk(struct drm_i915_private *dev_priv);
 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
                      const char *name, u32 reg, int ref_freq);
@@ -1509,20 +1588,12 @@ void intel_mark_idle(struct drm_i915_private *dev_priv);
 int intel_display_suspend(struct drm_device *dev);
 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
 void intel_encoder_destroy(struct drm_encoder *encoder);
-int intel_connector_init(struct intel_connector *);
-struct intel_connector *intel_connector_alloc(void);
-void intel_connector_free(struct intel_connector *connector);
-bool intel_connector_get_hw_state(struct intel_connector *connector);
-void intel_connector_attach_encoder(struct intel_connector *connector,
-                                   struct intel_encoder *encoder);
 struct drm_display_mode *
 intel_encoder_current_mode(struct intel_encoder *encoder);
 bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port);
 bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port);
 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
                              enum port port);
-
-enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
                                      struct drm_file *file_priv);
 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
@@ -1628,9 +1699,11 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv);
 void bxt_disable_dc9(struct drm_i915_private *dev_priv);
 void gen9_enable_dc5(struct drm_i915_private *dev_priv);
 unsigned int skl_cdclk_get_vco(unsigned int freq);
+void skl_enable_dc6(struct drm_i915_private *dev_priv);
 void intel_dp_get_m_n(struct intel_crtc *crtc,
                      struct intel_crtc_state *pipe_config);
-void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
+void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
+                     enum link_m_n_set m_n);
 int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
                        struct dpll *best_clock);
@@ -1641,12 +1714,14 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
 void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
 void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
 enum intel_display_power_domain intel_port_to_power_domain(enum port port);
+enum intel_display_power_domain
+intel_aux_power_domain(struct intel_digital_port *dig_port);
 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
                                 struct intel_crtc_state *pipe_config);
 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
                                  struct intel_crtc_state *crtc_state);
 
-u16 skl_scaler_calc_phase(int sub, bool chroma_center);
+u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_max_scale(const struct intel_crtc_state *crtc_state,
                  u32 pixel_format);
@@ -1670,6 +1745,24 @@ unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
                                   u32 pixel_format, u64 modifier,
                                   unsigned int rotation);
 
+/* intel_connector.c */
+int intel_connector_init(struct intel_connector *connector);
+struct intel_connector *intel_connector_alloc(void);
+void intel_connector_free(struct intel_connector *connector);
+void intel_connector_destroy(struct drm_connector *connector);
+int intel_connector_register(struct drm_connector *connector);
+void intel_connector_unregister(struct drm_connector *connector);
+void intel_connector_attach_encoder(struct intel_connector *connector,
+                                   struct intel_encoder *encoder);
+bool intel_connector_get_hw_state(struct intel_connector *connector);
+enum pipe intel_connector_get_pipe(struct intel_connector *connector);
+int intel_connector_update_modes(struct drm_connector *connector,
+                                struct edid *edid);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
+void intel_attach_force_audio_property(struct drm_connector *connector);
+void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+void intel_attach_aspect_ratio_property(struct drm_connector *connector);
+
 /* intel_csr.c */
 void intel_csr_ucode_init(struct drm_i915_private *);
 void intel_csr_load_program(struct drm_i915_private *);
@@ -1728,9 +1821,6 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
                               unsigned int frontbuffer_bits);
 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
                          unsigned int frontbuffer_bits);
-void icl_program_mg_dp_mode(struct intel_dp *intel_dp);
-void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port);
-void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port);
 
 void
 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
@@ -1748,6 +1838,10 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
 bool
 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
+uint16_t intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
+                                    int mode_clock, int mode_hdisplay);
+uint8_t intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
+                                    int mode_hdisplay);
 
 static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
 {
@@ -1768,6 +1862,9 @@ void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
 /* vlv_dsi.c */
 void vlv_dsi_init(struct drm_i915_private *dev_priv);
 
+/* icl_dsi.c */
+void icl_dsi_init(struct drm_i915_private *dev_priv);
+
 /* intel_dsi_dcs_backlight.c */
 int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
 
@@ -1858,7 +1955,6 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
 void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
 void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
 
-
 /* intel_lvds.c */
 bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
                             i915_reg_t lvds_reg, enum pipe *pipe);
@@ -1866,19 +1962,9 @@ void intel_lvds_init(struct drm_i915_private *dev_priv);
 struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
 bool intel_is_dual_link_lvds(struct drm_device *dev);
 
-
-/* intel_modes.c */
-int intel_connector_update_modes(struct drm_connector *connector,
-                                struct edid *edid);
-int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
-void intel_attach_force_audio_property(struct drm_connector *connector);
-void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
-void intel_attach_aspect_ratio_property(struct drm_connector *connector);
-
-
 /* intel_overlay.c */
-void intel_setup_overlay(struct drm_i915_private *dev_priv);
-void intel_cleanup_overlay(struct drm_i915_private *dev_priv);
+void intel_overlay_setup(struct drm_i915_private *dev_priv);
+void intel_overlay_cleanup(struct drm_i915_private *dev_priv);
 int intel_overlay_switch_off(struct intel_overlay *overlay);
 int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file_priv);
@@ -1907,7 +1993,6 @@ int intel_panel_setup_backlight(struct drm_connector *connector,
 void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
                                  const struct drm_connector_state *conn_state);
 void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
-void intel_panel_destroy_backlight(struct drm_connector *connector);
 extern struct drm_display_mode *intel_find_panel_downclock(
                                struct drm_i915_private *dev_priv,
                                struct drm_display_mode *fixed_mode,
@@ -1936,6 +2021,7 @@ int intel_hdcp_enable(struct intel_connector *connector);
 int intel_hdcp_disable(struct intel_connector *connector);
 int intel_hdcp_check_link(struct intel_connector *connector);
 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
+bool intel_hdcp_capable(struct intel_connector *connector);
 
 /* intel_psr.c */
 #define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
@@ -1962,11 +2048,16 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp);
 int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
                            u32 *out_value);
 
+/* intel_quirks.c */
+void intel_init_quirks(struct drm_i915_private *dev_priv);
+
 /* intel_runtime_pm.c */
 int intel_power_domains_init(struct drm_i915_private *);
 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
 void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv);
+void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume);
+void icl_display_core_uninit(struct drm_i915_private *dev_priv);
 void intel_power_domains_enable(struct drm_i915_private *dev_priv);
 void intel_power_domains_disable(struct drm_i915_private *dev_priv);
 
@@ -2101,10 +2192,9 @@ int intel_enable_sagv(struct drm_i915_private *dev_priv);
 int intel_disable_sagv(struct drm_i915_private *dev_priv);
 bool skl_wm_level_equals(const struct skl_wm_level *l1,
                         const struct skl_wm_level *l2);
-bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
-                                const struct skl_ddb_entry **entries,
-                                const struct skl_ddb_entry *ddb,
-                                int ignore);
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
+                                const struct skl_ddb_entry entries[],
+                                int num_entries, int ignore_idx);
 bool ilk_disable_lp_wm(struct drm_device *dev);
 int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
                                  struct intel_crtc_state *cstate);
@@ -2127,23 +2217,29 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
                                    struct drm_file *file_priv);
 void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
 void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
-void skl_update_plane(struct intel_plane *plane,
-                     const struct intel_crtc_state *crtc_state,
-                     const struct intel_plane_state *plane_state);
-void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
-bool skl_plane_get_hw_state(struct intel_plane *plane, enum pipe *pipe);
-bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
-                      enum pipe pipe, enum plane_id plane_id);
-bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
-                         enum pipe pipe, enum plane_id plane_id);
-unsigned int skl_plane_max_stride(struct intel_plane *plane,
-                                 u32 pixel_format, u64 modifier,
-                                 unsigned int rotation);
-int skl_plane_check(struct intel_crtc_state *crtc_state,
-                   struct intel_plane_state *plane_state);
 int intel_plane_check_stride(const struct intel_plane_state *plane_state);
 int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
 int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
+struct intel_plane *
+skl_universal_plane_create(struct drm_i915_private *dev_priv,
+                          enum pipe pipe, enum plane_id plane_id);
+
+static inline bool icl_is_nv12_y_plane(enum plane_id id)
+{
+       /* Don't need to do a gen check, these planes are only available on gen11 */
+       if (id == PLANE_SPRITE4 || id == PLANE_SPRITE5)
+               return true;
+
+       return false;
+}
+
+static inline bool icl_is_hdr_plane(struct intel_plane *plane)
+{
+       if (INTEL_GEN(to_i915(plane->base.dev)) < 11)
+               return false;
+
+       return plane->id < PLANE_SPRITE2;
+}
 
 /* intel_tv.c */
 void intel_tv_init(struct drm_i915_private *dev_priv);
@@ -2185,11 +2281,16 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
                               struct intel_crtc_state *crtc_state);
 
 /* intel_atomic_plane.c */
-struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane);
+struct intel_plane *intel_plane_alloc(void);
+void intel_plane_free(struct intel_plane *plane);
 struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
 void intel_plane_destroy_state(struct drm_plane *plane,
                               struct drm_plane_state *state);
 extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
+void intel_update_planes_on_crtc(struct intel_atomic_state *old_state,
+                                struct intel_crtc *crtc,
+                                struct intel_crtc_state *old_crtc_state,
+                                struct intel_crtc_state *new_crtc_state);
 int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
                                        struct intel_crtc_state *crtc_state,
                                        const struct intel_plane_state *old_plane_state,
@@ -2205,6 +2306,18 @@ void intel_color_load_luts(struct drm_crtc_state *crtc_state);
 bool lspcon_init(struct intel_digital_port *intel_dig_port);
 void lspcon_resume(struct intel_lspcon *lspcon);
 void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
+void lspcon_write_infoframe(struct intel_encoder *encoder,
+                           const struct intel_crtc_state *crtc_state,
+                           unsigned int type,
+                           const void *buf, ssize_t len);
+void lspcon_set_infoframes(struct intel_encoder *encoder,
+                          bool enable,
+                          const struct intel_crtc_state *crtc_state,
+                          const struct drm_connector_state *conn_state);
+bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
+                             const struct intel_crtc_state *pipe_config);
+void lspcon_ycbcr420_config(struct drm_connector *connector,
+                           struct intel_crtc_state *crtc_state);
 
 /* intel_pipe_crc.c */
 #ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
new file mode 100644 (file)
index 0000000..5fec02a
--- /dev/null
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <drm/drm_mipi_dsi.h>
+#include "intel_dsi.h"
+
+int intel_dsi_bitrate(const struct intel_dsi *intel_dsi)
+{
+       int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+
+       if (WARN_ON(bpp < 0))
+               bpp = 16;
+
+       return intel_dsi->pclk * bpp / intel_dsi->lane_count;
+}
+
+int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi)
+{
+       switch (intel_dsi->escape_clk_div) {
+       default:
+       case 0:
+               return 50;
+       case 1:
+               return 100;
+       case 2:
+               return 200;
+       }
+}
+
+int intel_dsi_get_modes(struct drm_connector *connector)
+{
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct drm_display_mode *mode;
+
+       DRM_DEBUG_KMS("\n");
+
+       if (!intel_connector->panel.fixed_mode) {
+               DRM_DEBUG_KMS("no fixed mode\n");
+               return 0;
+       }
+
+       mode = drm_mode_duplicate(connector->dev,
+                                 intel_connector->panel.fixed_mode);
+       if (!mode) {
+               DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
+               return 0;
+       }
+
+       drm_mode_probed_add(connector, mode);
+       return 1;
+}
+
+enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
+                                         struct drm_display_mode *mode)
+{
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+       int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+
+       DRM_DEBUG_KMS("\n");
+
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
+       if (fixed_mode) {
+               if (mode->hdisplay > fixed_mode->hdisplay)
+                       return MODE_PANEL;
+               if (mode->vdisplay > fixed_mode->vdisplay)
+                       return MODE_PANEL;
+               if (fixed_mode->clock > max_dotclk)
+                       return MODE_CLOCK_HIGH;
+       }
+
+       return MODE_OK;
+}
+
+struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
+                                          const struct mipi_dsi_host_ops *funcs,
+                                          enum port port)
+{
+       struct intel_dsi_host *host;
+       struct mipi_dsi_device *device;
+
+       host = kzalloc(sizeof(*host), GFP_KERNEL);
+       if (!host)
+               return NULL;
+
+       host->base.ops = funcs;
+       host->intel_dsi = intel_dsi;
+       host->port = port;
+
+       /*
+        * We should call mipi_dsi_host_register(&host->base) here, but we don't
+        * have a host->dev, and we don't have OF stuff either. So just use the
+        * dsi framework as a library and hope for the best. Create the dsi
+        * devices by ourselves here too. Need to be careful though, because we
+        * don't initialize any of the driver model devices here.
+        */
+       device = kzalloc(sizeof(*device), GFP_KERNEL);
+       if (!device) {
+               kfree(host);
+               return NULL;
+       }
+
+       device->host = &host->base;
+       host->device = device;
+
+       return host;
+}
+
+enum drm_panel_orientation
+intel_dsi_get_panel_orientation(struct intel_connector *connector)
+{
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       enum drm_panel_orientation orientation;
+
+       orientation = dev_priv->vbt.dsi.orientation;
+       if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+               return orientation;
+
+       orientation = dev_priv->vbt.orientation;
+       if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+               return orientation;
+
+       return DRM_MODE_PANEL_ORIENTATION_NORMAL;
+}
index ad7c1cb329836510d7263988a258a8be7f6e9623..ee93137f4433d3c3b7a482d7ea428f6a92fb602d 100644 (file)
@@ -81,14 +81,21 @@ struct intel_dsi {
        u16 dcs_backlight_ports;
        u16 dcs_cabc_ports;
 
+       /* RGB or BGR */
+       bool bgr_enabled;
+
        u8 pixel_overlap;
        u32 port_bits;
        u32 bw_timer;
        u32 dphy_reg;
+
+       /* data lanes dphy timing */
+       u32 dphy_data_lane_reg;
        u32 video_frmt_cfg_bits;
        u16 lp_byte_clk;
 
        /* timeouts in byte clocks */
+       u16 hs_tx_timeout;
        u16 lp_rx_timeout;
        u16 turn_arnd_val;
        u16 rst_timer_val;
@@ -129,9 +136,31 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
        return container_of(encoder, struct intel_dsi, base.base);
 }
 
+static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
+{
+       return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
+}
+
+static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
+{
+       return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
+}
+
+/* intel_dsi.c */
+int intel_dsi_bitrate(const struct intel_dsi *intel_dsi);
+int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi);
+enum drm_panel_orientation
+intel_dsi_get_panel_orientation(struct intel_connector *connector);
+
 /* vlv_dsi.c */
 void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
 enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
+int intel_dsi_get_modes(struct drm_connector *connector);
+enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
+                                         struct drm_display_mode *mode);
+struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
+                                          const struct mipi_dsi_host_ops *funcs,
+                                          enum port port);
 
 /* vlv_dsi_pll.c */
 int vlv_dsi_pll_compute(struct intel_encoder *encoder,
@@ -158,5 +187,6 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
 int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi);
 void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
                                 enum mipi_seq seq_id);
+void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
 
 #endif /* _INTEL_DSI_H */
index ac83d6b89ae0c36c236ffc5bd155d86f045f7526..a72de81f4832381e21900f72fdbbc3e116c75bae 100644 (file)
@@ -111,6 +111,7 @@ static inline enum port intel_dsi_seq_port_to_port(u8 port)
 static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
                                       const u8 *data)
 {
+       struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
        struct mipi_dsi_device *dsi_device;
        u8 type, flags, seq_port;
        u16 len;
@@ -181,7 +182,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
                break;
        }
 
-       vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
+       if (!IS_ICELAKE(dev_priv))
+               vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
 
 out:
        data += len;
@@ -481,6 +483,17 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
        }
 }
 
+void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
+{
+       struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+
+       /* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
+       if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3)
+               return;
+
+       msleep(msec);
+}
+
 int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi)
 {
        struct intel_connector *connector = intel_dsi->attached_connector;
@@ -499,110 +512,125 @@ int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi)
        return 1;
 }
 
-bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
+#define ICL_PREPARE_CNT_MAX    0x7
+#define ICL_CLK_ZERO_CNT_MAX   0xf
+#define ICL_TRAIL_CNT_MAX      0x7
+#define ICL_TCLK_PRE_CNT_MAX   0x3
+#define ICL_TCLK_POST_CNT_MAX  0x7
+#define ICL_HS_ZERO_CNT_MAX    0xf
+#define ICL_EXIT_ZERO_CNT_MAX  0x7
+
+static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
 {
        struct drm_device *dev = intel_dsi->base.base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
-       struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
-       struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
-       u32 bpp;
-       u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
-       u32 ui_num, ui_den;
+       u32 tlpx_ns;
        u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
        u32 ths_prepare_ns, tclk_trail_ns;
-       u32 tclk_prepare_clkzero, ths_prepare_hszero;
-       u32 lp_to_hs_switch, hs_to_lp_switch;
-       u32 pclk, computed_ddr;
-       u32 mul;
-       u16 burst_mode_ratio;
-       enum port port;
+       u32 hs_zero_cnt;
+       u32 tclk_pre_cnt, tclk_post_cnt;
 
-       DRM_DEBUG_KMS("\n");
+       tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
 
-       intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
-       intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
-       intel_dsi->lane_count = mipi_config->lane_cnt + 1;
-       intel_dsi->pixel_format =
-                       pixel_format_from_register_bits(
-                               mipi_config->videomode_color_format << 7);
-       bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
-
-       intel_dsi->dual_link = mipi_config->dual_link;
-       intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
-       intel_dsi->operation_mode = mipi_config->is_cmd_mode;
-       intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
-       intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
-       intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout;
-       intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout;
-       intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
-       intel_dsi->init_count = mipi_config->master_init_timer;
-       intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
-       intel_dsi->video_frmt_cfg_bits =
-               mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
-
-       pclk = mode->clock;
+       tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
+       ths_prepare_ns = max(mipi_config->ths_prepare,
+                            mipi_config->tclk_prepare);
 
-       /* In dual link mode each port needs half of pixel clock */
-       if (intel_dsi->dual_link) {
-               pclk = pclk / 2;
+       /*
+        * prepare cnt in escape clocks
+        * this field represents a hexadecimal value with a precision
+        * of 1.2 – i.e. the most significant bit is the integer
+        * and the least significant 2 bits are fraction bits.
+        * so, the field can represent a range of 0.25 to 1.75
+        */
+       prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
+       if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
+               DRM_DEBUG_KMS("prepare_cnt out of range (%d)\n", prepare_cnt);
+               prepare_cnt = ICL_PREPARE_CNT_MAX;
+       }
 
-               /* we can enable pixel_overlap if needed by panel. In this
-                * case we need to increase the pixelclock for extra pixels
-                */
-               if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
-                       pclk += DIV_ROUND_UP(mode->vtotal *
-                                               intel_dsi->pixel_overlap *
-                                               60, 1000);
-               }
+       /* clk zero count in escape clocks */
+       clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
+                                   ths_prepare_ns, tlpx_ns);
+       if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
+               DRM_DEBUG_KMS("clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
+               clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
        }
 
-       /* Burst Mode Ratio
-        * Target ddr frequency from VBT / non burst ddr freq
-        * multiply by 100 to preserve remainder
-        */
-       if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
-               if (mipi_config->target_burst_mode_freq) {
-                       computed_ddr = (pclk * bpp) / intel_dsi->lane_count;
+       /* trail cnt in escape clocks*/
+       trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
+       if (trail_cnt > ICL_TRAIL_CNT_MAX) {
+               DRM_DEBUG_KMS("trail_cnt out of range (%d)\n", trail_cnt);
+               trail_cnt = ICL_TRAIL_CNT_MAX;
+       }
 
-                       if (mipi_config->target_burst_mode_freq <
-                                                               computed_ddr) {
-                               DRM_ERROR("Burst mode freq is less than computed\n");
-                               return false;
-                       }
+       /* tclk pre count in escape clocks */
+       tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
+       if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
+               DRM_DEBUG_KMS("tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
+               tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
+       }
 
-                       burst_mode_ratio = DIV_ROUND_UP(
-                               mipi_config->target_burst_mode_freq * 100,
-                               computed_ddr);
+       /* tclk post count in escape clocks */
+       tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns);
+       if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX) {
+               DRM_DEBUG_KMS("tclk_post_cnt out of range (%d)\n", tclk_post_cnt);
+               tclk_post_cnt = ICL_TCLK_POST_CNT_MAX;
+       }
 
-                       pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100);
-               } else {
-                       DRM_ERROR("Burst mode target is not set\n");
-                       return false;
-               }
-       } else
-               burst_mode_ratio = 100;
+       /* hs zero cnt in escape clocks */
+       hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
+                                  ths_prepare_ns, tlpx_ns);
+       if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
+               DRM_DEBUG_KMS("hs_zero_cnt out of range (%d)\n", hs_zero_cnt);
+               hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
+       }
 
-       intel_dsi->burst_mode_ratio = burst_mode_ratio;
-       intel_dsi->pclk = pclk;
+       /* hs exit zero cnt in escape clocks */
+       exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
+       if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
+               DRM_DEBUG_KMS("exit_zero_cnt out of range (%d)\n", exit_zero_cnt);
+               exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
+       }
 
-       bitrate = (pclk * bpp) / intel_dsi->lane_count;
+       /* clock lane dphy timings */
+       intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE |
+                              CLK_PREPARE(prepare_cnt) |
+                              CLK_ZERO_OVERRIDE |
+                              CLK_ZERO(clk_zero_cnt) |
+                              CLK_PRE_OVERRIDE |
+                              CLK_PRE(tclk_pre_cnt) |
+                              CLK_POST_OVERRIDE |
+                              CLK_POST(tclk_post_cnt) |
+                              CLK_TRAIL_OVERRIDE |
+                              CLK_TRAIL(trail_cnt));
+
+       /* data lanes dphy timings */
+       intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE |
+                                        HS_PREPARE(prepare_cnt) |
+                                        HS_ZERO_OVERRIDE |
+                                        HS_ZERO(hs_zero_cnt) |
+                                        HS_TRAIL_OVERRIDE |
+                                        HS_TRAIL(trail_cnt) |
+                                        HS_EXIT_OVERRIDE |
+                                        HS_EXIT(exit_zero_cnt));
+}
 
-       switch (intel_dsi->escape_clk_div) {
-       case 0:
-               tlpx_ns = 50;
-               break;
-       case 1:
-               tlpx_ns = 100;
-               break;
+static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
+{
+       struct drm_device *dev = intel_dsi->base.base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+       u32 tlpx_ns, extra_byte_count, tlpx_ui;
+       u32 ui_num, ui_den;
+       u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
+       u32 ths_prepare_ns, tclk_trail_ns;
+       u32 tclk_prepare_clkzero, ths_prepare_hszero;
+       u32 lp_to_hs_switch, hs_to_lp_switch;
+       u32 mul;
 
-       case 2:
-               tlpx_ns = 200;
-               break;
-       default:
-               tlpx_ns = 50;
-               break;
-       }
+       tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
 
        switch (intel_dsi->lane_count) {
        case 1:
@@ -620,7 +648,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
 
        /* in Kbps */
        ui_num = NS_KHZ_RATIO;
-       ui_den = bitrate;
+       ui_den = intel_dsi_bitrate(intel_dsi);
 
        tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero;
        ths_prepare_hszero = mipi_config->ths_prepare_hszero;
@@ -746,6 +774,88 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
                DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8,
                        8);
        intel_dsi->clk_hs_to_lp_count += extra_byte_count;
+}
+
+bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
+{
+       struct drm_device *dev = intel_dsi->base.base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+       struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
+       struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
+       u16 burst_mode_ratio;
+       enum port port;
+
+       DRM_DEBUG_KMS("\n");
+
+       intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
+       intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
+       intel_dsi->lane_count = mipi_config->lane_cnt + 1;
+       intel_dsi->pixel_format =
+                       pixel_format_from_register_bits(
+                               mipi_config->videomode_color_format << 7);
+
+       intel_dsi->dual_link = mipi_config->dual_link;
+       intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
+       intel_dsi->operation_mode = mipi_config->is_cmd_mode;
+       intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
+       intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
+       intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout;
+       intel_dsi->hs_tx_timeout = mipi_config->hs_tx_timeout;
+       intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout;
+       intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
+       intel_dsi->init_count = mipi_config->master_init_timer;
+       intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
+       intel_dsi->video_frmt_cfg_bits =
+               mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
+       intel_dsi->bgr_enabled = mipi_config->rgb_flip;
+
+       /* Starting point, adjusted depending on dual link and burst mode */
+       intel_dsi->pclk = mode->clock;
+
+       /* In dual link mode each port needs half of pixel clock */
+       if (intel_dsi->dual_link) {
+               intel_dsi->pclk /= 2;
+
+               /* we can enable pixel_overlap if needed by panel. In this
+                * case we need to increase the pixelclock for extra pixels
+                */
+               if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
+                       intel_dsi->pclk += DIV_ROUND_UP(mode->vtotal * intel_dsi->pixel_overlap * 60, 1000);
+               }
+       }
+
+       /* Burst Mode Ratio
+        * Target ddr frequency from VBT / non burst ddr freq
+        * multiply by 100 to preserve remainder
+        */
+       if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
+               if (mipi_config->target_burst_mode_freq) {
+                       u32 bitrate = intel_dsi_bitrate(intel_dsi);
+
+                       if (mipi_config->target_burst_mode_freq < bitrate) {
+                               DRM_ERROR("Burst mode freq is less than computed\n");
+                               return false;
+                       }
+
+                       burst_mode_ratio = DIV_ROUND_UP(
+                               mipi_config->target_burst_mode_freq * 100,
+                               bitrate);
+
+                       intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100);
+               } else {
+                       DRM_ERROR("Burst mode target is not set\n");
+                       return false;
+               }
+       } else
+               burst_mode_ratio = 100;
+
+       intel_dsi->burst_mode_ratio = burst_mode_ratio;
+
+       if (IS_ICELAKE(dev_priv))
+               icl_dphy_param_init(intel_dsi);
+       else
+               vlv_dphy_param_init(intel_dsi);
 
        DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk);
        DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap);
index 4e142ff49708537b33b113f34248b0c213f1ab5e..0042a7f69387780f6f1d5c105ca9cae43d41713d 100644 (file)
@@ -256,6 +256,7 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
                return false;
 
+       pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
        return true;
 }
 
@@ -333,18 +334,11 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
        return 0;
 }
 
-static void intel_dvo_destroy(struct drm_connector *connector)
-{
-       drm_connector_cleanup(connector);
-       intel_panel_fini(&to_intel_connector(connector)->panel);
-       kfree(connector);
-}
-
 static const struct drm_connector_funcs intel_dvo_connector_funcs = {
        .detect = intel_dvo_detect,
        .late_register = intel_connector_register,
        .early_unregister = intel_connector_unregister,
-       .destroy = intel_dvo_destroy,
+       .destroy = intel_connector_destroy,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
        .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
index 217ed3ee1cab4e808f1dd8f4e8b60dca36ea65da..759c0fd58f8cde2704034a88a516b5d9bc1f06ef 100644 (file)
@@ -273,13 +273,13 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
        BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
        BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
 
-       if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS))
+       if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
                return -EINVAL;
 
-       if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
+       if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
                return -EINVAL;
 
-       if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
+       if (GEM_DEBUG_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
                return -EINVAL;
 
        GEM_BUG_ON(dev_priv->engine[id]);
@@ -335,7 +335,10 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
 
        WARN_ON(ring_mask == 0);
        WARN_ON(ring_mask &
-               GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
+               GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
+
+       if (i915_inject_load_failure())
+               return -ENODEV;
 
        for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
                if (!HAS_ENGINE(dev_priv, i))
@@ -399,7 +402,7 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
                err = -EINVAL;
                err_id = id;
 
-               if (GEM_WARN_ON(!init))
+               if (GEM_DEBUG_WARN_ON(!init))
                        goto cleanup;
 
                err = init(engine);
@@ -463,7 +466,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
        struct intel_engine_execlists * const execlists = &engine->execlists;
 
        execlists->port_mask = 1;
-       BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
+       GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
        GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
 
        execlists->queue_priority = INT_MIN;
@@ -482,7 +485,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
 void intel_engine_setup_common(struct intel_engine_cs *engine)
 {
        i915_timeline_init(engine->i915, &engine->timeline, engine->name);
-       lockdep_set_subclass(&engine->timeline.lock, TIMELINE_ENGINE);
+       i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
 
        intel_engine_init_execlist(engine);
        intel_engine_init_hangcheck(engine);
@@ -809,7 +812,7 @@ u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
        u32 slice = fls(sseu->slice_mask);
        u32 subslice = fls(sseu->subslice_mask[slice]);
 
-       if (INTEL_GEN(dev_priv) == 10)
+       if (IS_GEN10(dev_priv))
                mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
                                  GEN8_MCR_SUBSLICE(subslice);
        else if (INTEL_GEN(dev_priv) >= 11)
@@ -1534,10 +1537,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
        count = 0;
        drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
        for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
-               struct i915_priolist *p =
-                       rb_entry(rb, typeof(*p), node);
+               struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+               int i;
 
-               list_for_each_entry(rq, &p->requests, sched.link) {
+               priolist_for_each_request(rq, p, i) {
                        if (count++ < MAX_REQUESTS_TO_SHOW - 1)
                                print_request(m, rq, "\t\tQ ");
                        else
@@ -1559,8 +1562,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
        for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
                struct intel_wait *w = rb_entry(rb, typeof(*w), node);
 
-               drm_printf(m, "\t%s [%d] waiting for %x\n",
-                          w->tsk->comm, w->tsk->pid, w->seqno);
+               drm_printf(m, "\t%s [%d:%c] waiting for %x\n",
+                          w->tsk->comm, w->tsk->pid,
+                          task_state_to_char(w->tsk),
+                          w->seqno);
        }
        spin_unlock(&b->rb_lock);
        local_irq_restore(flags);
index 74d425c700ef092e9012042605d142282f0685c7..14cbaf4a0e9391233b12b474cd8bb1506a7e7e61 100644 (file)
@@ -84,7 +84,7 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
        int lines;
 
        intel_fbc_get_plane_source_size(cache, NULL, &lines);
-       if (INTEL_GEN(dev_priv) == 7)
+       if (IS_GEN7(dev_priv))
                lines = min(lines, 2048);
        else if (INTEL_GEN(dev_priv) >= 8)
                lines = min(lines, 2560);
@@ -674,6 +674,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
        cache->plane.adjusted_y = plane_state->color_plane[0].y;
        cache->plane.y = plane_state->base.src.y1 >> 16;
 
+       cache->plane.pixel_blend_mode = plane_state->base.pixel_blend_mode;
+
        if (!cache->plane.visible)
                return;
 
@@ -748,6 +750,12 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
                return false;
        }
 
+       if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
+           cache->fb.format->has_alpha) {
+               fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC";
+               return false;
+       }
+
        /* WaFbcExceedCdClockThreshold:hsw,bdw */
        if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
            cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) {
index f99332972b7ab5f0e947af3d8f7fb4eb6294f26b..2480c7d6edee468f15fa9af2794cf2f8c72404fc 100644 (file)
@@ -593,7 +593,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
                 * pipe.  Note we need to use the selected fb's pitch and bpp
                 * rather than the current pipe's, since they differ.
                 */
-               cur_size = intel_crtc->config->base.adjusted_mode.crtc_hdisplay;
+               cur_size = crtc->state->adjusted_mode.crtc_hdisplay;
                cur_size = cur_size * fb->base.format->cpp[0];
                if (fb->base.pitches[0] < cur_size) {
                        DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
@@ -603,13 +603,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
                        break;
                }
 
-               cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
+               cur_size = crtc->state->adjusted_mode.crtc_vdisplay;
                cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
                cur_size *= fb->base.pitches[0];
                DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
                              pipe_name(intel_crtc->pipe),
-                             intel_crtc->config->base.adjusted_mode.crtc_hdisplay,
-                             intel_crtc->config->base.adjusted_mode.crtc_vdisplay,
+                             crtc->state->adjusted_mode.crtc_hdisplay,
+                             crtc->state->adjusted_mode.crtc_vdisplay,
                              fb->base.format->cpp[0] * 8,
                              cur_size);
 
index 230aea69385d4fb5e682d7991e5bc252a7012b6d..8660af3fd75566468651d03dd1eecca4429abbe1 100644 (file)
@@ -50,7 +50,8 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
        unsigned int i;
 
        guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
-       guc->send_regs.count = SOFT_SCRATCH_COUNT - 1;
+       guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
+       BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
 
        for (i = 0; i < guc->send_regs.count; i++) {
                fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
@@ -521,6 +522,44 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
        return intel_guc_send(guc, action, ARRAY_SIZE(action));
 }
 
+/*
+ * The ENTER/EXIT_S_STATE actions queue the save/restore operation in GuC FW and
+ * then return, so waiting on the H2G is not enough to guarantee GuC is done.
+ * When all the processing is done, GuC writes INTEL_GUC_SLEEP_STATE_SUCCESS to
+ * scratch register 14, so we can poll on that. Note that GuC does not ensure
+ * that the value in the register is different from
+ * INTEL_GUC_SLEEP_STATE_SUCCESS while the action is in progress so we need to
+ * take care of that ourselves as well.
+ */
+static int guc_sleep_state_action(struct intel_guc *guc,
+                                 const u32 *action, u32 len)
+{
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       int ret;
+       u32 status;
+
+       I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK);
+
+       ret = intel_guc_send(guc, action, len);
+       if (ret)
+               return ret;
+
+       ret = __intel_wait_for_register(dev_priv, SOFT_SCRATCH(14),
+                                       INTEL_GUC_SLEEP_STATE_INVALID_MASK,
+                                       0, 0, 10, &status);
+       if (ret)
+               return ret;
+
+       if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) {
+               DRM_ERROR("GuC failed to change sleep state. "
+                         "action=0x%x, err=%u\n",
+                         action[0], status);
+               return -EIO;
+       }
+
+       return 0;
+}
+
 /**
  * intel_guc_suspend() - notify GuC entering suspend state
  * @guc:       the guc
@@ -533,7 +572,7 @@ int intel_guc_suspend(struct intel_guc *guc)
                intel_guc_ggtt_offset(guc, guc->shared_data)
        };
 
-       return intel_guc_send(guc, data, ARRAY_SIZE(data));
+       return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
 }
 
 /**
@@ -571,7 +610,7 @@ int intel_guc_resume(struct intel_guc *guc)
                intel_guc_ggtt_offset(guc, guc->shared_data)
        };
 
-       return intel_guc_send(guc, data, ARRAY_SIZE(data));
+       return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
 }
 
 /**
index ad42faf48c46a3f3773ce6eae1c9702b8379c041..0f1c4f9ebfd886581ac11beefb477ee047e0ab55 100644 (file)
@@ -95,6 +95,11 @@ struct intel_guc {
        void (*notify)(struct intel_guc *guc);
 };
 
+static inline bool intel_guc_is_alive(struct intel_guc *guc)
+{
+       return intel_uc_fw_is_loaded(&guc->fw);
+}
+
 static
 inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
 {
index a9e6fcce467c62a7e9b0e3035a181bbe46cd007f..a67144ee5ceb6c93a30f8a48f9d5314166e7cb94 100644 (file)
@@ -78,7 +78,8 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw)
                guc_fw->major_ver_wanted = KBL_FW_MAJOR;
                guc_fw->minor_ver_wanted = KBL_FW_MINOR;
        } else {
-               DRM_WARN("%s: No firmware known for this platform!\n",
+               dev_info(dev_priv->drm.dev,
+                        "%s: No firmware known for this platform!\n",
                         intel_uc_fw_type_repr(guc_fw->type));
        }
 }
@@ -125,66 +126,26 @@ static void guc_prepare_xfer(struct intel_guc *guc)
 }
 
 /* Copy RSA signature from the fw image to HW for verification */
-static int guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
+static void guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
 {
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
-       struct intel_uc_fw *guc_fw = &guc->fw;
-       struct sg_table *sg = vma->pages;
        u32 rsa[UOS_RSA_SCRATCH_COUNT];
        int i;
 
-       if (sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa),
-                              guc_fw->rsa_offset) != sizeof(rsa))
-               return -EINVAL;
+       sg_pcopy_to_buffer(vma->pages->sgl, vma->pages->nents,
+                          rsa, sizeof(rsa), guc->fw.rsa_offset);
 
        for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
                I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
-
-       return 0;
 }
 
-/*
- * Transfer the firmware image to RAM for execution by the microcontroller.
- *
- * Architecturally, the DMA engine is bidirectional, and can potentially even
- * transfer between GTT locations. This functionality is left out of the API
- * for now as there is no need for it.
- */
-static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
+static bool guc_xfer_completed(struct intel_guc *guc, u32 *status)
 {
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
-       struct intel_uc_fw *guc_fw = &guc->fw;
-       unsigned long offset;
-       u32 status;
-       int ret;
-
-       /*
-        * The header plus uCode will be copied to WOPCM via DMA, excluding any
-        * other components
-        */
-       I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
-
-       /* Set the source address for the new blob */
-       offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset;
-       I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
-       I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
 
-       /*
-        * Set the DMA destination. Current uCode expects the code to be
-        * loaded at 8k; locations below this are used for the stack.
-        */
-       I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
-       I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
-
-       /* Finally start the DMA */
-       I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
-
-       /* Wait for DMA to finish */
-       ret = __intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0,
-                                          2, 100, &status);
-       DRM_DEBUG_DRIVER("GuC DMA status %#x\n", status);
-
-       return ret;
+       /* Did we complete the xfer? */
+       *status = I915_READ(DMA_CTRL);
+       return !(*status & START_DMA);
 }
 
 /*
@@ -217,8 +178,8 @@ static int guc_wait_ucode(struct intel_guc *guc)
         * NB: Docs recommend not using the interrupt for completion.
         * Measurements indicate this should take no more than 20ms, so a
         * timeout here indicates that the GuC has failed and is unusable.
-        * (Higher levels of the driver will attempt to fall back to
-        * execlist mode if this happens.)
+        * (Higher levels of the driver may decide to reset the GuC and
+        * attempt the ucode load again if this happens.)
         */
        ret = wait_for(guc_ready(guc, &status), 100);
        DRM_DEBUG_DRIVER("GuC status %#x\n", status);
@@ -228,9 +189,51 @@ static int guc_wait_ucode(struct intel_guc *guc)
                ret = -ENOEXEC;
        }
 
+       if (ret == 0 && !guc_xfer_completed(guc, &status)) {
+               DRM_ERROR("GuC is ready, but the xfer %08x is incomplete\n",
+                         status);
+               ret = -ENXIO;
+       }
+
        return ret;
 }
 
+/*
+ * Transfer the firmware image to RAM for execution by the microcontroller.
+ *
+ * Architecturally, the DMA engine is bidirectional, and can potentially even
+ * transfer between GTT locations. This functionality is left out of the API
+ * for now as there is no need for it.
+ */
+static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
+{
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       struct intel_uc_fw *guc_fw = &guc->fw;
+       unsigned long offset;
+
+       /*
+        * The header plus uCode will be copied to WOPCM via DMA, excluding any
+        * other components
+        */
+       I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
+
+       /* Set the source address for the new blob */
+       offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset;
+       I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
+       I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
+
+       /*
+        * Set the DMA destination. Current uCode expects the code to be
+        * loaded at 8k; locations below this are used for the stack.
+        */
+       I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
+       I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
+
+       /* Finally start the DMA */
+       I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
+
+       return guc_wait_ucode(guc);
+}
 /*
  * Load the GuC firmware blob into the MinuteIA.
  */
@@ -251,17 +254,9 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
         * by the DMA engine in one operation, whereas the RSA signature is
         * loaded via MMIO.
         */
-       ret = guc_xfer_rsa(guc, vma);
-       if (ret)
-               DRM_WARN("GuC firmware signature xfer error %d\n", ret);
+       guc_xfer_rsa(guc, vma);
 
        ret = guc_xfer_ucode(guc, vma);
-       if (ret)
-               DRM_WARN("GuC firmware code xfer error %d\n", ret);
-
-       ret = guc_wait_ucode(guc);
-       if (ret)
-               DRM_ERROR("GuC firmware xfer error %d\n", ret);
 
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
index 8382d591c7842bb3f292fb3e62558c57b3b6a135..b2f5148f4f173a2f63097997886499e6b7160920 100644 (file)
 #define GUC_VIDEO_ENGINE2              4
 #define GUC_MAX_ENGINES_NUM            (GUC_VIDEO_ENGINE2 + 1)
 
+#define GUC_DOORBELL_INVALID           256
+
+#define GUC_DB_SIZE                    (PAGE_SIZE)
+#define GUC_WQ_SIZE                    (PAGE_SIZE * 2)
+
 /* Work queue item header definitions */
 #define WQ_STATUS_ACTIVE               1
 #define WQ_STATUS_SUSPENDED            2
@@ -59,9 +64,6 @@
 #define WQ_RING_TAIL_MAX               0x7FF   /* 2^11 QWords */
 #define WQ_RING_TAIL_MASK              (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT)
 
-#define GUC_DOORBELL_ENABLED           1
-#define GUC_DOORBELL_DISABLED          0
-
 #define GUC_STAGE_DESC_ATTR_ACTIVE     BIT(0)
 #define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1)
 #define GUC_STAGE_DESC_ATTR_KERNEL     BIT(2)
@@ -219,26 +221,6 @@ struct uc_css_header {
        u32 header_info;
 } __packed;
 
-struct guc_doorbell_info {
-       u32 db_status;
-       u32 cookie;
-       u32 reserved[14];
-} __packed;
-
-union guc_doorbell_qw {
-       struct {
-               u32 db_status;
-               u32 cookie;
-       };
-       u64 value_qw;
-} __packed;
-
-#define GUC_NUM_DOORBELLS      256
-#define GUC_DOORBELL_INVALID   (GUC_NUM_DOORBELLS)
-
-#define GUC_DB_SIZE                    (PAGE_SIZE)
-#define GUC_WQ_SIZE                    (PAGE_SIZE * 2)
-
 /* Work item for submitting workloads into work queue of GuC. */
 struct guc_wq_item {
        u32 header;
@@ -601,7 +583,9 @@ struct guc_shared_ctx_data {
  * registers, where first register holds data treated as message header,
  * and other registers are used to hold message payload.
  *
- * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8
+ * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8,
+ * but no H2G command takes more than 8 parameters and the GuC FW
+ * itself uses an 8-element array to store the H2G message.
  *
  *      +-----------+---------+---------+---------+
  *      |  MMIO[0]  | MMIO[1] |   ...   | MMIO[n] |
@@ -633,6 +617,8 @@ struct guc_shared_ctx_data {
  *   field.
  */
 
+#define GUC_MAX_MMIO_MSG_LEN           8
+
 #define INTEL_GUC_MSG_TYPE_SHIFT       28
 #define INTEL_GUC_MSG_TYPE_MASK                (0xF << INTEL_GUC_MSG_TYPE_SHIFT)
 #define INTEL_GUC_MSG_DATA_SHIFT       16
@@ -687,6 +673,13 @@ enum intel_guc_report_status {
        INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4,
 };
 
+enum intel_guc_sleep_state_status {
+       INTEL_GUC_SLEEP_STATE_SUCCESS = 0x0,
+       INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x1,
+       INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x2
+#define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000
+};
+
 #define GUC_LOG_CONTROL_LOGGING_ENABLED        (1 << 0)
 #define GUC_LOG_CONTROL_VERBOSITY_SHIFT        4
 #define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT)
index d86084742a4a0e32e4ce982c954bc440cb26059e..57e7ad522c2fed3d0166b0e7fb6a79e73de572b4 100644 (file)
 #define GUC_SEND_INTERRUPT             _MMIO(0xc4c8)
 #define   GUC_SEND_TRIGGER               (1<<0)
 
+#define GUC_NUM_DOORBELLS              256
+
+/* format of the HW-monitored doorbell cacheline */
+struct guc_doorbell_info {
+       u32 db_status;
+#define GUC_DOORBELL_DISABLED          0
+#define GUC_DOORBELL_ENABLED           1
+
+       u32 cookie;
+       u32 reserved[14];
+} __packed;
+
 #define GEN8_DRBREGL(x)                        _MMIO(0x1000 + (x) * 8)
 #define   GEN8_DRB_VALID                 (1<<0)
 #define GEN8_DRBREGU(x)                        _MMIO(0x1000 + (x) * 8 + 4)
index a81f04d46e87650b7185a6508afe9d4f72567ddd..1570dcbe249c0c8c6b9c8755bf0a89e2a8b2368b 100644 (file)
@@ -192,7 +192,15 @@ static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
        return client->vaddr + client->doorbell_offset;
 }
 
-static void __create_doorbell(struct intel_guc_client *client)
+static bool __doorbell_valid(struct intel_guc *guc, u16 db_id)
+{
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+       GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
+       return I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID;
+}
+
+static void __init_doorbell(struct intel_guc_client *client)
 {
        struct guc_doorbell_info *doorbell;
 
@@ -201,21 +209,19 @@ static void __create_doorbell(struct intel_guc_client *client)
        doorbell->cookie = 0;
 }
 
-static void __destroy_doorbell(struct intel_guc_client *client)
+static void __fini_doorbell(struct intel_guc_client *client)
 {
-       struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
        struct guc_doorbell_info *doorbell;
        u16 db_id = client->doorbell_id;
 
        doorbell = __get_doorbell(client);
        doorbell->db_status = GUC_DOORBELL_DISABLED;
-       doorbell->cookie = 0;
 
        /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
         * to go to zero after updating db_status before we call the GuC to
         * release the doorbell
         */
-       if (wait_for_us(!(I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID), 10))
+       if (wait_for_us(!__doorbell_valid(client->guc, db_id), 10))
                WARN_ONCE(true, "Doorbell never became invalid after disable\n");
 }
 
@@ -227,11 +233,11 @@ static int create_doorbell(struct intel_guc_client *client)
                return -ENODEV; /* internal setup error, should never happen */
 
        __update_doorbell_desc(client, client->doorbell_id);
-       __create_doorbell(client);
+       __init_doorbell(client);
 
        ret = __guc_allocate_doorbell(client->guc, client->stage_id);
        if (ret) {
-               __destroy_doorbell(client);
+               __fini_doorbell(client);
                __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
                DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n",
                                 client->stage_id, ret);
@@ -247,7 +253,7 @@ static int destroy_doorbell(struct intel_guc_client *client)
 
        GEM_BUG_ON(!has_doorbell(client));
 
-       __destroy_doorbell(client);
+       __fini_doorbell(client);
        ret = __guc_deallocate_doorbell(client->guc, client->stage_id);
        if (ret)
                DRM_ERROR("Couldn't destroy client %u doorbell: %d\n",
@@ -282,8 +288,7 @@ __get_process_desc(struct intel_guc_client *client)
 /*
  * Initialise the process descriptor shared with the GuC firmware.
  */
-static void guc_proc_desc_init(struct intel_guc *guc,
-                              struct intel_guc_client *client)
+static void guc_proc_desc_init(struct intel_guc_client *client)
 {
        struct guc_process_desc *desc;
 
@@ -304,6 +309,14 @@ static void guc_proc_desc_init(struct intel_guc *guc,
        desc->priority = client->priority;
 }
 
+static void guc_proc_desc_fini(struct intel_guc_client *client)
+{
+       struct guc_process_desc *desc;
+
+       desc = __get_process_desc(client);
+       memset(desc, 0, sizeof(*desc));
+}
+
 static int guc_stage_desc_pool_create(struct intel_guc *guc)
 {
        struct i915_vma *vma;
@@ -341,9 +354,9 @@ static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
  * data structures relating to this client (doorbell, process descriptor,
  * write queue, etc).
  */
-static void guc_stage_desc_init(struct intel_guc *guc,
-                               struct intel_guc_client *client)
+static void guc_stage_desc_init(struct intel_guc_client *client)
 {
+       struct intel_guc *guc = client->guc;
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
        struct intel_engine_cs *engine;
        struct i915_gem_context *ctx = client->owner;
@@ -424,8 +437,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
        desc->desc_private = ptr_to_u64(client);
 }
 
-static void guc_stage_desc_fini(struct intel_guc *guc,
-                               struct intel_guc_client *client)
+static void guc_stage_desc_fini(struct intel_guc_client *client)
 {
        struct guc_stage_desc *desc;
 
@@ -486,14 +498,6 @@ static void guc_wq_item_append(struct intel_guc_client *client,
        WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
 }
 
-static void guc_reset_wq(struct intel_guc_client *client)
-{
-       struct guc_process_desc *desc = __get_process_desc(client);
-
-       desc->head = 0;
-       desc->tail = 0;
-}
-
 static void guc_ring_doorbell(struct intel_guc_client *client)
 {
        struct guc_doorbell_info *db;
@@ -746,30 +750,28 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
        while ((rb = rb_first_cached(&execlists->queue))) {
                struct i915_priolist *p = to_priolist(rb);
                struct i915_request *rq, *rn;
+               int i;
 
-               list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
+               priolist_for_each_request_consume(rq, rn, p, i) {
                        if (last && rq->hw_context != last->hw_context) {
-                               if (port == last_port) {
-                                       __list_del_many(&p->requests,
-                                                       &rq->sched.link);
+                               if (port == last_port)
                                        goto done;
-                               }
 
                                if (submit)
                                        port_assign(port, last);
                                port++;
                        }
 
-                       INIT_LIST_HEAD(&rq->sched.link);
+                       list_del_init(&rq->sched.link);
 
                        __i915_request_submit(rq);
                        trace_i915_request_in(rq, port_index(port, execlists));
+
                        last = rq;
                        submit = true;
                }
 
                rb_erase_cached(&p->node, &execlists->queue);
-               INIT_LIST_HEAD(&p->requests);
                if (p->priority != I915_PRIORITY_NORMAL)
                        kmem_cache_free(engine->i915->priorities, p);
        }
@@ -791,19 +793,8 @@ done:
 
 static void guc_dequeue(struct intel_engine_cs *engine)
 {
-       unsigned long flags;
-       bool submit;
-
-       local_irq_save(flags);
-
-       spin_lock(&engine->timeline.lock);
-       submit = __guc_dequeue(engine);
-       spin_unlock(&engine->timeline.lock);
-
-       if (submit)
+       if (__guc_dequeue(engine))
                guc_submit(engine);
-
-       local_irq_restore(flags);
 }
 
 static void guc_submission_tasklet(unsigned long data)
@@ -812,6 +803,9 @@ static void guc_submission_tasklet(unsigned long data)
        struct intel_engine_execlists * const execlists = &engine->execlists;
        struct execlist_port *port = execlists->port;
        struct i915_request *rq;
+       unsigned long flags;
+
+       spin_lock_irqsave(&engine->timeline.lock, flags);
 
        rq = port_request(port);
        while (rq && i915_request_completed(rq)) {
@@ -835,6 +829,8 @@ static void guc_submission_tasklet(unsigned long data)
 
        if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
                guc_dequeue(engine);
+
+       spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
 static struct i915_request *
@@ -877,72 +873,31 @@ guc_reset_prepare(struct intel_engine_cs *engine)
 /* Check that a doorbell register is in the expected state */
 static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
 {
-       struct drm_i915_private *dev_priv = guc_to_i915(guc);
-       u32 drbregl;
        bool valid;
 
-       GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID);
+       GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
 
-       drbregl = I915_READ(GEN8_DRBREGL(db_id));
-       valid = drbregl & GEN8_DRB_VALID;
+       valid = __doorbell_valid(guc, db_id);
 
        if (test_bit(db_id, guc->doorbell_bitmap) == valid)
                return true;
 
-       DRM_DEBUG_DRIVER("Doorbell %d has unexpected state (0x%x): valid=%s\n",
-                        db_id, drbregl, yesno(valid));
+       DRM_DEBUG_DRIVER("Doorbell %u has unexpected state: valid=%s\n",
+                        db_id, yesno(valid));
 
        return false;
 }
 
 static bool guc_verify_doorbells(struct intel_guc *guc)
 {
+       bool doorbells_ok = true;
        u16 db_id;
 
        for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
                if (!doorbell_ok(guc, db_id))
-                       return false;
-
-       return true;
-}
-
-static int guc_clients_doorbell_init(struct intel_guc *guc)
-{
-       int ret;
-
-       ret = create_doorbell(guc->execbuf_client);
-       if (ret)
-               return ret;
-
-       if (guc->preempt_client) {
-               ret = create_doorbell(guc->preempt_client);
-               if (ret) {
-                       destroy_doorbell(guc->execbuf_client);
-                       return ret;
-               }
-       }
-
-       return 0;
-}
-
-static void guc_clients_doorbell_fini(struct intel_guc *guc)
-{
-       /*
-        * By the time we're here, GuC has already been reset.
-        * Instead of trying (in vain) to communicate with it, let's just
-        * cleanup the doorbell HW and our internal state.
-        */
-       if (guc->preempt_client) {
-               __destroy_doorbell(guc->preempt_client);
-               __update_doorbell_desc(guc->preempt_client,
-                                      GUC_DOORBELL_INVALID);
-       }
+                       doorbells_ok = false;
 
-       if (guc->execbuf_client) {
-               __destroy_doorbell(guc->execbuf_client);
-               __update_doorbell_desc(guc->execbuf_client,
-                                      GUC_DOORBELL_INVALID);
-       }
+       return doorbells_ok;
 }
 
 /**
@@ -1005,6 +960,10 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
        }
        client->vaddr = vaddr;
 
+       ret = reserve_doorbell(client);
+       if (ret)
+               goto err_vaddr;
+
        client->doorbell_offset = __select_cacheline(guc);
 
        /*
@@ -1017,13 +976,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
        else
                client->proc_desc_offset = (GUC_DB_SIZE / 2);
 
-       guc_proc_desc_init(guc, client);
-       guc_stage_desc_init(guc, client);
-
-       ret = reserve_doorbell(client);
-       if (ret)
-               goto err_vaddr;
-
        DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n",
                         priority, client, client->engines, client->stage_id);
        DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
@@ -1045,7 +997,6 @@ err_client:
 static void guc_client_free(struct intel_guc_client *client)
 {
        unreserve_doorbell(client);
-       guc_stage_desc_fini(client->guc, client);
        i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP);
        ida_simple_remove(&client->guc->stage_ids, client->stage_id);
        kfree(client);
@@ -1112,6 +1063,69 @@ static void guc_clients_destroy(struct intel_guc *guc)
                guc_client_free(client);
 }
 
+static int __guc_client_enable(struct intel_guc_client *client)
+{
+       int ret;
+
+       guc_proc_desc_init(client);
+       guc_stage_desc_init(client);
+
+       ret = create_doorbell(client);
+       if (ret)
+               goto fail;
+
+       return 0;
+
+fail:
+       guc_stage_desc_fini(client);
+       guc_proc_desc_fini(client);
+       return ret;
+}
+
+static void __guc_client_disable(struct intel_guc_client *client)
+{
+       /*
+        * By the time we're here, GuC may have already been reset. if that is
+        * the case, instead of trying (in vain) to communicate with it, let's
+        * just cleanup the doorbell HW and our internal state.
+        */
+       if (intel_guc_is_alive(client->guc))
+               destroy_doorbell(client);
+       else
+               __fini_doorbell(client);
+
+       guc_stage_desc_fini(client);
+       guc_proc_desc_fini(client);
+}
+
+static int guc_clients_enable(struct intel_guc *guc)
+{
+       int ret;
+
+       ret = __guc_client_enable(guc->execbuf_client);
+       if (ret)
+               return ret;
+
+       if (guc->preempt_client) {
+               ret = __guc_client_enable(guc->preempt_client);
+               if (ret) {
+                       __guc_client_disable(guc->execbuf_client);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static void guc_clients_disable(struct intel_guc *guc)
+{
+       if (guc->preempt_client)
+               __guc_client_disable(guc->preempt_client);
+
+       if (guc->execbuf_client)
+               __guc_client_disable(guc->execbuf_client);
+}
+
 /*
  * Set up the memory resources to be shared with the GuC (via the GGTT)
  * at firmware loading time.
@@ -1295,15 +1309,11 @@ int intel_guc_submission_enable(struct intel_guc *guc)
 
        GEM_BUG_ON(!guc->execbuf_client);
 
-       guc_reset_wq(guc->execbuf_client);
-       if (guc->preempt_client)
-               guc_reset_wq(guc->preempt_client);
-
        err = intel_guc_sample_forcewake(guc);
        if (err)
                return err;
 
-       err = guc_clients_doorbell_init(guc);
+       err = guc_clients_enable(guc);
        if (err)
                return err;
 
@@ -1325,7 +1335,7 @@ void intel_guc_submission_disable(struct intel_guc *guc)
        GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */
 
        guc_interrupts_release(dev_priv);
-       guc_clients_doorbell_fini(guc);
+       guc_clients_disable(guc);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
index 26e48fc95543244bb163f5f0f7d5370bda11ed59..1bf487f9425404cb85b3b5f5ef3bb43be32a0405 100644 (file)
 
 #define KEY_LOAD_TRIES 5
 
+static
+bool intel_hdcp_is_ksv_valid(u8 *ksv)
+{
+       int i, ones = 0;
+       /* KSV has 20 1's and 20 0's */
+       for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
+               ones += hweight8(ksv[i]);
+       if (ones != 20)
+               return false;
+
+       return true;
+}
+
+static
+int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
+                              const struct intel_hdcp_shim *shim, u8 *bksv)
+{
+       int ret, i, tries = 2;
+
+       /* HDCP spec states that we must retry the bksv if it is invalid */
+       for (i = 0; i < tries; i++) {
+               ret = shim->read_bksv(intel_dig_port, bksv);
+               if (ret)
+                       return ret;
+               if (intel_hdcp_is_ksv_valid(bksv))
+                       break;
+       }
+       if (i == tries) {
+               DRM_DEBUG_KMS("Bksv is invalid\n");
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+/* Is HDCP1.4 capable on Platform and Sink */
+bool intel_hdcp_capable(struct intel_connector *connector)
+{
+       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+       const struct intel_hdcp_shim *shim = connector->hdcp.shim;
+       bool capable = false;
+       u8 bksv[5];
+
+       if (!shim)
+               return capable;
+
+       if (shim->hdcp_capable) {
+               shim->hdcp_capable(intel_dig_port, &capable);
+       } else {
+               if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv))
+                       capable = true;
+       }
+
+       return capable;
+}
+
 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
                                    const struct intel_hdcp_shim *shim)
 {
@@ -167,18 +223,6 @@ u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
        return -EINVAL;
 }
 
-static
-bool intel_hdcp_is_ksv_valid(u8 *ksv)
-{
-       int i, ones = 0;
-       /* KSV has 20 1's and 20 0's */
-       for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
-               ones += hweight8(ksv[i]);
-       if (ones != 20)
-               return false;
-       return true;
-}
-
 static
 int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
                                const struct intel_hdcp_shim *shim,
@@ -383,7 +427,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
        if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
                                    HDCP_SHA1_COMPLETE,
                                    HDCP_SHA1_COMPLETE, 1)) {
-               DRM_DEBUG_KMS("Timed out waiting for SHA1 complete\n");
+               DRM_ERROR("Timed out waiting for SHA1 complete\n");
                return -ETIMEDOUT;
        }
        if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
@@ -404,7 +448,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
 
        ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
        if (ret) {
-               DRM_ERROR("KSV list failed to become ready (%d)\n", ret);
+               DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret);
                return ret;
        }
 
@@ -414,7 +458,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
 
        if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
            DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
-               DRM_ERROR("Max Topology Limit Exceeded\n");
+               DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
                return -EPERM;
        }
 
@@ -450,7 +494,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
        }
 
        if (i == tries) {
-               DRM_ERROR("V Prime validation failed.(%d)\n", ret);
+               DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret);
                goto err;
        }
 
@@ -499,7 +543,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
                if (ret)
                        return ret;
                if (!hdcp_capable) {
-                       DRM_ERROR("Panel is not HDCP capable\n");
+                       DRM_DEBUG_KMS("Panel is not HDCP capable\n");
                        return -EINVAL;
                }
        }
@@ -527,18 +571,9 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
 
        memset(&bksv, 0, sizeof(bksv));
 
-       /* HDCP spec states that we must retry the bksv if it is invalid */
-       for (i = 0; i < tries; i++) {
-               ret = shim->read_bksv(intel_dig_port, bksv.shim);
-               if (ret)
-                       return ret;
-               if (intel_hdcp_is_ksv_valid(bksv.shim))
-                       break;
-       }
-       if (i == tries) {
-               DRM_ERROR("HDCP failed, Bksv is invalid\n");
-               return -ENODEV;
-       }
+       ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim);
+       if (ret < 0)
+               return ret;
 
        I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
        I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
@@ -594,8 +629,8 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
        }
 
        if (i == tries) {
-               DRM_ERROR("Timed out waiting for Ri prime match (%x)\n",
-                         I915_READ(PORT_HDCP_STATUS(port)));
+               DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
+                             I915_READ(PORT_HDCP_STATUS(port)));
                return -ETIMEDOUT;
        }
 
@@ -618,14 +653,9 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
        return 0;
 }
 
-static
-struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector)
-{
-       return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
-}
-
 static int _intel_hdcp_disable(struct intel_connector *connector)
 {
+       struct intel_hdcp *hdcp = &connector->hdcp;
        struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
        struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
        enum port port = intel_dig_port->base.port;
@@ -641,7 +671,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
                return -ETIMEDOUT;
        }
 
-       ret = connector->hdcp_shim->toggle_signalling(intel_dig_port, false);
+       ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
        if (ret) {
                DRM_ERROR("Failed to disable HDCP signalling\n");
                return ret;
@@ -653,6 +683,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
 
 static int _intel_hdcp_enable(struct intel_connector *connector)
 {
+       struct intel_hdcp *hdcp = &connector->hdcp;
        struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
        int i, ret, tries = 3;
 
@@ -677,8 +708,7 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
 
        /* Incase of authentication failures, HDCP spec expects reauth. */
        for (i = 0; i < tries; i++) {
-               ret = intel_hdcp_auth(conn_to_dig_port(connector),
-                                     connector->hdcp_shim);
+               ret = intel_hdcp_auth(conn_to_dig_port(connector), hdcp->shim);
                if (!ret)
                        return 0;
 
@@ -688,42 +718,50 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
                _intel_hdcp_disable(connector);
        }
 
-       DRM_ERROR("HDCP authentication failed (%d tries/%d)\n", tries, ret);
+       DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret);
        return ret;
 }
 
+static inline
+struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
+{
+       return container_of(hdcp, struct intel_connector, hdcp);
+}
+
 static void intel_hdcp_check_work(struct work_struct *work)
 {
-       struct intel_connector *connector = container_of(to_delayed_work(work),
-                                                        struct intel_connector,
-                                                        hdcp_check_work);
+       struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
+                                              struct intel_hdcp,
+                                              check_work);
+       struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
+
        if (!intel_hdcp_check_link(connector))
-               schedule_delayed_work(&connector->hdcp_check_work,
+               schedule_delayed_work(&hdcp->check_work,
                                      DRM_HDCP_CHECK_PERIOD_MS);
 }
 
 static void intel_hdcp_prop_work(struct work_struct *work)
 {
-       struct intel_connector *connector = container_of(work,
-                                                        struct intel_connector,
-                                                        hdcp_prop_work);
+       struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
+                                              prop_work);
+       struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
        struct drm_device *dev = connector->base.dev;
        struct drm_connector_state *state;
 
        drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-       mutex_lock(&connector->hdcp_mutex);
+       mutex_lock(&hdcp->mutex);
 
        /*
         * This worker is only used to flip between ENABLED/DESIRED. Either of
-        * those to UNDESIRED is handled by core. If hdcp_value == UNDESIRED,
+        * those to UNDESIRED is handled by core. If value == UNDESIRED,
         * we're running just after hdcp has been disabled, so just exit
         */
-       if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+       if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
                state = connector->base.state;
-               state->content_protection = connector->hdcp_value;
+               state->content_protection = hdcp->value;
        }
 
-       mutex_unlock(&connector->hdcp_mutex);
+       mutex_unlock(&hdcp->mutex);
        drm_modeset_unlock(&dev->mode_config.connection_mutex);
 }
 
@@ -735,8 +773,9 @@ bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
 }
 
 int intel_hdcp_init(struct intel_connector *connector,
-                   const struct intel_hdcp_shim *hdcp_shim)
+                   const struct intel_hdcp_shim *shim)
 {
+       struct intel_hdcp *hdcp = &connector->hdcp;
        int ret;
 
        ret = drm_connector_attach_content_protection_property(
@@ -744,51 +783,53 @@ int intel_hdcp_init(struct intel_connector *connector,
        if (ret)
                return ret;
 
-       connector->hdcp_shim = hdcp_shim;
-       mutex_init(&connector->hdcp_mutex);
-       INIT_DELAYED_WORK(&connector->hdcp_check_work, intel_hdcp_check_work);
-       INIT_WORK(&connector->hdcp_prop_work, intel_hdcp_prop_work);
+       hdcp->shim = shim;
+       mutex_init(&hdcp->mutex);
+       INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
+       INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
        return 0;
 }
 
 int intel_hdcp_enable(struct intel_connector *connector)
 {
+       struct intel_hdcp *hdcp = &connector->hdcp;
        int ret;
 
-       if (!connector->hdcp_shim)
+       if (!hdcp->shim)
                return -ENOENT;
 
-       mutex_lock(&connector->hdcp_mutex);
+       mutex_lock(&hdcp->mutex);
 
        ret = _intel_hdcp_enable(connector);
        if (ret)
                goto out;
 
-       connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
-       schedule_work(&connector->hdcp_prop_work);
-       schedule_delayed_work(&connector->hdcp_check_work,
+       hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+       schedule_work(&hdcp->prop_work);
+       schedule_delayed_work(&hdcp->check_work,
                              DRM_HDCP_CHECK_PERIOD_MS);
 out:
-       mutex_unlock(&connector->hdcp_mutex);
+       mutex_unlock(&hdcp->mutex);
        return ret;
 }
 
 int intel_hdcp_disable(struct intel_connector *connector)
 {
+       struct intel_hdcp *hdcp = &connector->hdcp;
        int ret = 0;
 
-       if (!connector->hdcp_shim)
+       if (!hdcp->shim)
                return -ENOENT;
 
-       mutex_lock(&connector->hdcp_mutex);
+       mutex_lock(&hdcp->mutex);
 
-       if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
-               connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
+       if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+               hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
                ret = _intel_hdcp_disable(connector);
        }
 
-       mutex_unlock(&connector->hdcp_mutex);
-       cancel_delayed_work_sync(&connector->hdcp_check_work);
+       mutex_unlock(&hdcp->mutex);
+       cancel_delayed_work_sync(&hdcp->check_work);
        return ret;
 }
 
@@ -828,17 +869,18 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
 /* Implements Part 3 of the HDCP authorization procedure */
 int intel_hdcp_check_link(struct intel_connector *connector)
 {
+       struct intel_hdcp *hdcp = &connector->hdcp;
        struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
        struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
        enum port port = intel_dig_port->base.port;
        int ret = 0;
 
-       if (!connector->hdcp_shim)
+       if (!hdcp->shim)
                return -ENOENT;
 
-       mutex_lock(&connector->hdcp_mutex);
+       mutex_lock(&hdcp->mutex);
 
-       if (connector->hdcp_value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+       if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
                goto out;
 
        if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) {
@@ -846,17 +888,15 @@ int intel_hdcp_check_link(struct intel_connector *connector)
                          connector->base.name, connector->base.base.id,
                          I915_READ(PORT_HDCP_STATUS(port)));
                ret = -ENXIO;
-               connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-               schedule_work(&connector->hdcp_prop_work);
+               hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+               schedule_work(&hdcp->prop_work);
                goto out;
        }
 
-       if (connector->hdcp_shim->check_link(intel_dig_port)) {
-               if (connector->hdcp_value !=
-                   DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
-                       connector->hdcp_value =
-                               DRM_MODE_CONTENT_PROTECTION_ENABLED;
-                       schedule_work(&connector->hdcp_prop_work);
+       if (hdcp->shim->check_link(intel_dig_port)) {
+               if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+                       hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+                       schedule_work(&hdcp->prop_work);
                }
                goto out;
        }
@@ -867,20 +907,20 @@ int intel_hdcp_check_link(struct intel_connector *connector)
        ret = _intel_hdcp_disable(connector);
        if (ret) {
                DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
-               connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-               schedule_work(&connector->hdcp_prop_work);
+               hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+               schedule_work(&hdcp->prop_work);
                goto out;
        }
 
        ret = _intel_hdcp_enable(connector);
        if (ret) {
-               DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
-               connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-               schedule_work(&connector->hdcp_prop_work);
+               DRM_DEBUG_KMS("Failed to enable hdcp (%d)\n", ret);
+               hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+               schedule_work(&hdcp->prop_work);
                goto out;
        }
 
 out:
-       mutex_unlock(&connector->hdcp_mutex);
+       mutex_unlock(&hdcp->mutex);
        return ret;
 }
index d7234e03fdb0cc385db1482993cddcdbc62126c2..e2c6a2b3e8f2591da7a2652c26955421f702e816 100644 (file)
@@ -148,14 +148,13 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
        }
 }
 
-static void g4x_write_infoframe(struct drm_encoder *encoder,
+static void g4x_write_infoframe(struct intel_encoder *encoder,
                                const struct intel_crtc_state *crtc_state,
                                unsigned int type,
                                const void *frame, ssize_t len)
 {
        const u32 *data = frame;
-       struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        u32 val = I915_READ(VIDEO_DIP_CTL);
        int i;
 
@@ -186,31 +185,29 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
        POSTING_READ(VIDEO_DIP_CTL);
 }
 
-static bool g4x_infoframe_enabled(struct drm_encoder *encoder,
+static bool g4x_infoframe_enabled(struct intel_encoder *encoder,
                                  const struct intel_crtc_state *pipe_config)
 {
-       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
-       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        u32 val = I915_READ(VIDEO_DIP_CTL);
 
        if ((val & VIDEO_DIP_ENABLE) == 0)
                return false;
 
-       if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
+       if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
                return false;
 
        return val & (VIDEO_DIP_ENABLE_AVI |
                      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
 }
 
-static void ibx_write_infoframe(struct drm_encoder *encoder,
+static void ibx_write_infoframe(struct intel_encoder *encoder,
                                const struct intel_crtc_state *crtc_state,
                                unsigned int type,
                                const void *frame, ssize_t len)
 {
        const u32 *data = frame;
-       struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
        i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
        u32 val = I915_READ(reg);
@@ -243,11 +240,10 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
        POSTING_READ(reg);
 }
 
-static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
+static bool ibx_infoframe_enabled(struct intel_encoder *encoder,
                                  const struct intel_crtc_state *pipe_config)
 {
-       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
-       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
        i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
        u32 val = I915_READ(reg);
@@ -255,7 +251,7 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
        if ((val & VIDEO_DIP_ENABLE) == 0)
                return false;
 
-       if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
+       if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
                return false;
 
        return val & (VIDEO_DIP_ENABLE_AVI |
@@ -263,14 +259,13 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
                      VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 }
 
-static void cpt_write_infoframe(struct drm_encoder *encoder,
+static void cpt_write_infoframe(struct intel_encoder *encoder,
                                const struct intel_crtc_state *crtc_state,
                                unsigned int type,
                                const void *frame, ssize_t len)
 {
        const u32 *data = frame;
-       struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
        i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
        u32 val = I915_READ(reg);
@@ -306,10 +301,10 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
        POSTING_READ(reg);
 }
 
-static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
+static bool cpt_infoframe_enabled(struct intel_encoder *encoder,
                                  const struct intel_crtc_state *pipe_config)
 {
-       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
        u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
 
@@ -321,14 +316,13 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
                      VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 }
 
-static void vlv_write_infoframe(struct drm_encoder *encoder,
+static void vlv_write_infoframe(struct intel_encoder *encoder,
                                const struct intel_crtc_state *crtc_state,
                                unsigned int type,
                                const void *frame, ssize_t len)
 {
        const u32 *data = frame;
-       struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
        i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
        u32 val = I915_READ(reg);
@@ -361,18 +355,17 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
        POSTING_READ(reg);
 }
 
-static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
+static bool vlv_infoframe_enabled(struct intel_encoder *encoder,
                                  const struct intel_crtc_state *pipe_config)
 {
-       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
-       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
        u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
 
        if ((val & VIDEO_DIP_ENABLE) == 0)
                return false;
 
-       if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
+       if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
                return false;
 
        return val & (VIDEO_DIP_ENABLE_AVI |
@@ -380,14 +373,13 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
                      VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 }
 
-static void hsw_write_infoframe(struct drm_encoder *encoder,
+static void hsw_write_infoframe(struct intel_encoder *encoder,
                                const struct intel_crtc_state *crtc_state,
                                unsigned int type,
                                const void *frame, ssize_t len)
 {
        const u32 *data = frame;
-       struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
        i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
        int data_size = type == DP_SDP_VSC ?
@@ -415,10 +407,10 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
        POSTING_READ(ctl_reg);
 }
 
-static bool hsw_infoframe_enabled(struct drm_encoder *encoder,
+static bool hsw_infoframe_enabled(struct intel_encoder *encoder,
                                  const struct intel_crtc_state *pipe_config)
 {
-       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
 
        return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
@@ -443,11 +435,11 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder,
  * trick them by giving an offset into the buffer and moving back the header
  * bytes by one.
  */
-static void intel_write_infoframe(struct drm_encoder *encoder,
+static void intel_write_infoframe(struct intel_encoder *encoder,
                                  const struct intel_crtc_state *crtc_state,
                                  union hdmi_infoframe *frame)
 {
-       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+       struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
        u8 buffer[VIDEO_DIP_DATA_SIZE];
        ssize_t len;
 
@@ -457,20 +449,20 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
                return;
 
        /* Insert the 'hole' (see big comment above) at position 3 */
-       buffer[0] = buffer[1];
-       buffer[1] = buffer[2];
-       buffer[2] = buffer[3];
+       memmove(&buffer[0], &buffer[1], 3);
        buffer[3] = 0;
        len++;
 
-       intel_dig_port->write_infoframe(encoder, crtc_state, frame->any.type, buffer, len);
+       intel_dig_port->write_infoframe(encoder,
+                                       crtc_state,
+                                       frame->any.type, buffer, len);
 }
 
-static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+static void intel_hdmi_set_avi_infoframe(struct intel_encoder *encoder,
                                         const struct intel_crtc_state *crtc_state,
                                         const struct drm_connector_state *conn_state)
 {
-       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        const struct drm_display_mode *adjusted_mode =
                &crtc_state->base.adjusted_mode;
        struct drm_connector *connector = &intel_hdmi->attached_connector->base;
@@ -487,8 +479,10 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
                return;
        }
 
-       if (crtc_state->ycbcr420)
+       if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
                frame.avi.colorspace = HDMI_COLORSPACE_YUV420;
+       else if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
+               frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
        else
                frame.avi.colorspace = HDMI_COLORSPACE_RGB;
 
@@ -503,10 +497,11 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
                                            conn_state);
 
        /* TODO: handle pixel repetition for YCBCR420 outputs */
-       intel_write_infoframe(encoder, crtc_state, &frame);
+       intel_write_infoframe(encoder, crtc_state,
+                             &frame);
 }
 
-static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder,
+static void intel_hdmi_set_spd_infoframe(struct intel_encoder *encoder,
                                         const struct intel_crtc_state *crtc_state)
 {
        union hdmi_infoframe frame;
@@ -520,11 +515,12 @@ static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder,
 
        frame.spd.sdi = HDMI_SPD_SDI_PC;
 
-       intel_write_infoframe(encoder, crtc_state, &frame);
+       intel_write_infoframe(encoder, crtc_state,
+                             &frame);
 }
 
 static void
-intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
+intel_hdmi_set_hdmi_infoframe(struct intel_encoder *encoder,
                              const struct intel_crtc_state *crtc_state,
                              const struct drm_connector_state *conn_state)
 {
@@ -537,20 +533,21 @@ intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
        if (ret < 0)
                return;
 
-       intel_write_infoframe(encoder, crtc_state, &frame);
+       intel_write_infoframe(encoder, crtc_state,
+                             &frame);
 }
 
-static void g4x_set_infoframes(struct drm_encoder *encoder,
+static void g4x_set_infoframes(struct intel_encoder *encoder,
                               bool enable,
                               const struct intel_crtc_state *crtc_state,
                               const struct drm_connector_state *conn_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
-       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
        struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
        i915_reg_t reg = VIDEO_DIP_CTL;
        u32 val = I915_READ(reg);
-       u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
+       u32 port = VIDEO_DIP_PORT(encoder->port);
 
        assert_hdmi_port_disabled(intel_hdmi);
 
@@ -658,11 +655,11 @@ static bool gcp_default_phase_possible(int pipe_bpp,
                 mode->crtc_htotal/2 % pixels_per_group == 0);
 }
 
-static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder,
+static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
                                         const struct intel_crtc_state *crtc_state,
                                         const struct drm_connector_state *conn_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        i915_reg_t reg;
        u32 val = 0;
@@ -690,18 +687,18 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder,
        return val != 0;
 }
 
-static void ibx_set_infoframes(struct drm_encoder *encoder,
+static void ibx_set_infoframes(struct intel_encoder *encoder,
                               bool enable,
                               const struct intel_crtc_state *crtc_state,
                               const struct drm_connector_state *conn_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+       struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
        struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
        i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
        u32 val = I915_READ(reg);
-       u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
+       u32 port = VIDEO_DIP_PORT(encoder->port);
 
        assert_hdmi_port_disabled(intel_hdmi);
 
@@ -743,14 +740,14 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
        intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
 }
 
-static void cpt_set_infoframes(struct drm_encoder *encoder,
+static void cpt_set_infoframes(struct intel_encoder *encoder,
                               bool enable,
                               const struct intel_crtc_state *crtc_state,
                               const struct drm_connector_state *conn_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
        u32 val = I915_READ(reg);
 
@@ -786,18 +783,17 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
        intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
 }
 
-static void vlv_set_infoframes(struct drm_encoder *encoder,
+static void vlv_set_infoframes(struct intel_encoder *encoder,
                               bool enable,
                               const struct intel_crtc_state *crtc_state,
                               const struct drm_connector_state *conn_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
-       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
        u32 val = I915_READ(reg);
-       u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
+       u32 port = VIDEO_DIP_PORT(encoder->port);
 
        assert_hdmi_port_disabled(intel_hdmi);
 
@@ -839,12 +835,12 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
        intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
 }
 
-static void hsw_set_infoframes(struct drm_encoder *encoder,
+static void hsw_set_infoframes(struct intel_encoder *encoder,
                               bool enable,
                               const struct intel_crtc_state *crtc_state,
                               const struct drm_connector_state *conn_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
        u32 val = I915_READ(reg);
 
@@ -966,13 +962,13 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
        ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an,
                                    DRM_HDCP_AN_LEN);
        if (ret) {
-               DRM_ERROR("Write An over DDC failed (%d)\n", ret);
+               DRM_DEBUG_KMS("Write An over DDC failed (%d)\n", ret);
                return ret;
        }
 
        ret = intel_gmbus_output_aksv(adapter);
        if (ret < 0) {
-               DRM_ERROR("Failed to output aksv (%d)\n", ret);
+               DRM_DEBUG_KMS("Failed to output aksv (%d)\n", ret);
                return ret;
        }
        return 0;
@@ -985,7 +981,7 @@ static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
        ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv,
                                   DRM_HDCP_KSV_LEN);
        if (ret)
-               DRM_ERROR("Read Bksv over DDC failed (%d)\n", ret);
+               DRM_DEBUG_KMS("Read Bksv over DDC failed (%d)\n", ret);
        return ret;
 }
 
@@ -997,7 +993,7 @@ int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
        ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS,
                                   bstatus, DRM_HDCP_BSTATUS_LEN);
        if (ret)
-               DRM_ERROR("Read bstatus over DDC failed (%d)\n", ret);
+               DRM_DEBUG_KMS("Read bstatus over DDC failed (%d)\n", ret);
        return ret;
 }
 
@@ -1010,7 +1006,7 @@ int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
 
        ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
        if (ret) {
-               DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret);
+               DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret);
                return ret;
        }
        *repeater_present = val & DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT;
@@ -1025,7 +1021,7 @@ int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
        ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME,
                                   ri_prime, DRM_HDCP_RI_LEN);
        if (ret)
-               DRM_ERROR("Read Ri' over DDC failed (%d)\n", ret);
+               DRM_DEBUG_KMS("Read Ri' over DDC failed (%d)\n", ret);
        return ret;
 }
 
@@ -1038,7 +1034,7 @@ int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
 
        ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
        if (ret) {
-               DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret);
+               DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret);
                return ret;
        }
        *ksv_ready = val & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY;
@@ -1053,7 +1049,7 @@ int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
        ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO,
                                   ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN);
        if (ret) {
-               DRM_ERROR("Read ksv fifo over DDC failed (%d)\n", ret);
+               DRM_DEBUG_KMS("Read ksv fifo over DDC failed (%d)\n", ret);
                return ret;
        }
        return 0;
@@ -1071,7 +1067,7 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
        ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i),
                                   part, DRM_HDCP_V_PRIME_PART_LEN);
        if (ret)
-               DRM_ERROR("Read V'[%d] over DDC failed (%d)\n", i, ret);
+               DRM_DEBUG_KMS("Read V'[%d] over DDC failed (%d)\n", i, ret);
        return ret;
 }
 
@@ -1218,7 +1214,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
        if (tmp & HDMI_MODE_SELECT_HDMI)
                pipe_config->has_hdmi_sink = true;
 
-       if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config))
+       if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
                pipe_config->has_infoframe = true;
 
        if (tmp & SDVO_AUDIO_ENABLE)
@@ -1439,7 +1435,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
                intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
        }
 
-       intel_dig_port->set_infoframes(&encoder->base, false,
+       intel_dig_port->set_infoframes(encoder,
+                                      false,
                                       old_crtc_state, old_conn_state);
 
        intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
@@ -1598,6 +1595,8 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
        struct drm_atomic_state *state = crtc_state->base.state;
        struct drm_connector_state *connector_state;
        struct drm_connector *connector;
+       const struct drm_display_mode *adjusted_mode =
+               &crtc_state->base.adjusted_mode;
        int i;
 
        if (HAS_GMCH_DISPLAY(dev_priv))
@@ -1625,7 +1624,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
                if (connector_state->crtc != crtc_state->base.crtc)
                        continue;
 
-               if (crtc_state->ycbcr420) {
+               if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
                        const struct drm_hdmi_info *hdmi = &info->hdmi;
 
                        if (bpc == 12 && !(hdmi->y420_dc_modes &
@@ -1646,7 +1645,14 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
 
        /* Display WA #1139: glk */
        if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
-           crtc_state->base.adjusted_mode.htotal > 5460)
+           adjusted_mode->htotal > 5460)
+               return false;
+
+       /* Display Wa_1405510057:icl */
+       if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
+           bpc == 10 && IS_ICELAKE(dev_priv) &&
+           (adjusted_mode->crtc_hblank_end -
+            adjusted_mode->crtc_hblank_start) % 8 == 2)
                return false;
 
        return true;
@@ -1670,7 +1676,7 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
        *clock_12bpc /= 2;
        *clock_10bpc /= 2;
        *clock_8bpc /= 2;
-       config->ycbcr420 = true;
+       config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
 
        /* YCBCR 420 output conversion needs a scaler */
        if (skl_update_scaler_crtc(config)) {
@@ -1704,6 +1710,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
                return false;
 
+       pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
        pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
 
        if (pipe_config->has_hdmi_sink)
@@ -1974,7 +1981,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
 
        intel_hdmi_prepare(encoder, pipe_config);
 
-       intel_dig_port->set_infoframes(&encoder->base,
+       intel_dig_port->set_infoframes(encoder,
                                       pipe_config->has_infoframe,
                                       pipe_config, conn_state);
 }
@@ -1992,7 +1999,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
        vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
                                 0x2b247878);
 
-       dport->set_infoframes(&encoder->base,
+       dport->set_infoframes(encoder,
                              pipe_config->has_infoframe,
                              pipe_config, conn_state);
 
@@ -2063,7 +2070,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
        /* Use 800mV-0dB */
        chv_set_phy_signal_level(encoder, 128, 102, false);
 
-       dport->set_infoframes(&encoder->base,
+       dport->set_infoframes(encoder,
                              pipe_config->has_infoframe,
                              pipe_config, conn_state);
 
@@ -2075,13 +2082,26 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
        chv_phy_release_cl2_override(encoder);
 }
 
+static int
+intel_hdmi_connector_register(struct drm_connector *connector)
+{
+       int ret;
+
+       ret = intel_connector_register(connector);
+       if (ret)
+               return ret;
+
+       i915_debugfs_connector_add(connector);
+
+       return ret;
+}
+
 static void intel_hdmi_destroy(struct drm_connector *connector)
 {
        if (intel_attached_hdmi(connector)->cec_notifier)
                cec_notifier_put(intel_attached_hdmi(connector)->cec_notifier);
-       kfree(to_intel_connector(connector)->detect_edid);
-       drm_connector_cleanup(connector);
-       kfree(connector);
+
+       intel_connector_destroy(connector);
 }
 
 static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
@@ -2090,7 +2110,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
        .fill_modes = drm_helper_probe_single_connector_modes,
        .atomic_get_property = intel_digital_connector_atomic_get_property,
        .atomic_set_property = intel_digital_connector_atomic_set_property,
-       .late_register = intel_connector_register,
+       .late_register = intel_hdmi_connector_register,
        .early_unregister = intel_connector_unregister,
        .destroy = intel_hdmi_destroy,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -2110,11 +2130,16 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
 static void
 intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
 {
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
+
        intel_attach_force_audio_property(connector);
        intel_attach_broadcast_rgb_property(connector);
        intel_attach_aspect_ratio_property(connector);
        drm_connector_attach_content_type_property(connector);
        connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
+
+       if (!HAS_GMCH_DISPLAY(dev_priv))
+               drm_connector_attach_max_bpc_property(connector, 8, 12);
 }
 
 /*
@@ -2325,9 +2350,18 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port)
                intel_dig_port->set_infoframes = g4x_set_infoframes;
                intel_dig_port->infoframe_enabled = g4x_infoframe_enabled;
        } else if (HAS_DDI(dev_priv)) {
-               intel_dig_port->write_infoframe = hsw_write_infoframe;
-               intel_dig_port->set_infoframes = hsw_set_infoframes;
-               intel_dig_port->infoframe_enabled = hsw_infoframe_enabled;
+               if (intel_dig_port->lspcon.active) {
+                       intel_dig_port->write_infoframe =
+                                       lspcon_write_infoframe;
+                       intel_dig_port->set_infoframes = lspcon_set_infoframes;
+                       intel_dig_port->infoframe_enabled =
+                                               lspcon_infoframe_enabled;
+               } else {
+                       intel_dig_port->set_infoframes = hsw_set_infoframes;
+                       intel_dig_port->infoframe_enabled =
+                                               hsw_infoframe_enabled;
+                       intel_dig_port->write_infoframe = hsw_write_infoframe;
+               }
        } else if (HAS_PCH_IBX(dev_priv)) {
                intel_dig_port->write_infoframe = ibx_write_infoframe;
                intel_dig_port->set_infoframes = ibx_set_infoframes;
@@ -2486,5 +2520,6 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
 
        intel_infoframe_init(intel_dig_port);
 
+       intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
        intel_hdmi_init_connector(intel_dig_port, intel_connector);
 }
index 648a13c6043c0071ddd495424691d795b39b96a1..e24174d08fedb55ca4619e61cb96570deab6bc52 100644 (file)
@@ -114,51 +114,68 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
 #define HPD_STORM_REENABLE_DELAY       (2 * 60 * 1000)
 
 /**
- * intel_hpd_irq_storm_detect - gather stats and detect HPD irq storm on a pin
+ * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
  * @dev_priv: private driver data pointer
  * @pin: the pin to gather stats on
+ * @long_hpd: whether the HPD IRQ was long or short
  *
- * Gather stats about HPD irqs from the specified @pin, and detect irq
+ * Gather stats about HPD IRQs from the specified @pin, and detect IRQ
  * storms. Only the pin specific stats and state are changed, the caller is
  * responsible for further action.
  *
- * The number of irqs that are allowed within @HPD_STORM_DETECT_PERIOD is
+ * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
  * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to
- * @HPD_STORM_DEFAULT_THRESHOLD. If this threshold is exceeded, it's
- * considered an irq storm and the irq state is set to @HPD_MARK_DISABLED.
+ * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
+ * short IRQs count as +1. If this threshold is exceeded, it's considered an
+ * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
+ *
+ * By default, most systems will only count long IRQs towards
+ * &dev_priv->hotplug.hpd_storm_threshold. However, some older systems also
+ * suffer from short IRQ storms and must also track these. Because short IRQ
+ * storms are naturally caused by sideband interactions with DP MST devices,
+ * short IRQ detection is only enabled for systems without DP MST support.
+ * Systems which are new enough to support DP MST are far less likely to
+ * suffer from IRQ storms at all, so this is fine.
  *
  * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs,
  * and should only be adjusted for automated hotplug testing.
  *
- * Return true if an irq storm was detected on @pin.
+ * Return true if an IRQ storm was detected on @pin.
  */
 static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
-                                      enum hpd_pin pin)
+                                      enum hpd_pin pin, bool long_hpd)
 {
-       unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies;
+       struct i915_hotplug *hpd = &dev_priv->hotplug;
+       unsigned long start = hpd->stats[pin].last_jiffies;
        unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
-       const int threshold = dev_priv->hotplug.hpd_storm_threshold;
+       const int increment = long_hpd ? 10 : 1;
+       const int threshold = hpd->hpd_storm_threshold;
        bool storm = false;
 
+       if (!threshold ||
+           (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled))
+               return false;
+
        if (!time_in_range(jiffies, start, end)) {
-               dev_priv->hotplug.stats[pin].last_jiffies = jiffies;
-               dev_priv->hotplug.stats[pin].count = 0;
-               DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin);
-       } else if (dev_priv->hotplug.stats[pin].count > threshold &&
-                  threshold) {
-               dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED;
+               hpd->stats[pin].last_jiffies = jiffies;
+               hpd->stats[pin].count = 0;
+       }
+
+       hpd->stats[pin].count += increment;
+       if (hpd->stats[pin].count > threshold) {
+               hpd->stats[pin].state = HPD_MARK_DISABLED;
                DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
                storm = true;
        } else {
-               dev_priv->hotplug.stats[pin].count++;
                DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
-                             dev_priv->hotplug.stats[pin].count);
+                             hpd->stats[pin].count);
        }
 
        return storm;
 }
 
-static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
+static void
+intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
 {
        struct drm_device *dev = &dev_priv->drm;
        struct intel_connector *intel_connector;
@@ -228,7 +245,9 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
                drm_for_each_connector_iter(connector, &conn_iter) {
                        struct intel_connector *intel_connector = to_intel_connector(connector);
 
-                       if (intel_connector->encoder->hpd_pin == pin) {
+                       /* Don't check MST ports, they don't have pins */
+                       if (!intel_connector->mst_port &&
+                           intel_connector->encoder->hpd_pin == pin) {
                                if (connector->polled != intel_connector->polled)
                                        DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
                                                         connector->name);
@@ -346,8 +365,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
        hpd_event_bits = dev_priv->hotplug.event_bits;
        dev_priv->hotplug.event_bits = 0;
 
-       /* Disable hotplug on connectors that hit an irq storm. */
-       intel_hpd_irq_storm_disable(dev_priv);
+       /* Enable polling for connectors which had HPD IRQ storms */
+       intel_hpd_irq_storm_switch_to_polling(dev_priv);
 
        spin_unlock_irq(&dev_priv->irq_lock);
 
@@ -395,37 +414,54 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
        struct intel_encoder *encoder;
        bool storm_detected = false;
        bool queue_dig = false, queue_hp = false;
+       u32 long_hpd_pulse_mask = 0;
+       u32 short_hpd_pulse_mask = 0;
+       enum hpd_pin pin;
 
        if (!pin_mask)
                return;
 
        spin_lock(&dev_priv->irq_lock);
+
+       /*
+        * Determine whether ->hpd_pulse() exists for each pin, and
+        * whether we have a short or a long pulse. This is needed
+        * as each pin may have up to two encoders (HDMI and DP) and
+        * only the one of them (DP) will have ->hpd_pulse().
+        */
        for_each_intel_encoder(&dev_priv->drm, encoder) {
-               enum hpd_pin pin = encoder->hpd_pin;
                bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder);
+               enum port port = encoder->port;
+               bool long_hpd;
 
+               pin = encoder->hpd_pin;
                if (!(BIT(pin) & pin_mask))
                        continue;
 
-               if (has_hpd_pulse) {
-                       bool long_hpd = long_mask & BIT(pin);
-                       enum port port = encoder->port;
+               if (!has_hpd_pulse)
+                       continue;
 
-                       DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
-                                        long_hpd ? "long" : "short");
-                       /*
-                        * For long HPD pulses we want to have the digital queue happen,
-                        * but we still want HPD storm detection to function.
-                        */
-                       queue_dig = true;
-                       if (long_hpd) {
-                               dev_priv->hotplug.long_port_mask |= (1 << port);
-                       } else {
-                               /* for short HPD just trigger the digital queue */
-                               dev_priv->hotplug.short_port_mask |= (1 << port);
-                               continue;
-                       }
+               long_hpd = long_mask & BIT(pin);
+
+               DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
+                                long_hpd ? "long" : "short");
+               queue_dig = true;
+
+               if (long_hpd) {
+                       long_hpd_pulse_mask |= BIT(pin);
+                       dev_priv->hotplug.long_port_mask |= BIT(port);
+               } else {
+                       short_hpd_pulse_mask |= BIT(pin);
+                       dev_priv->hotplug.short_port_mask |= BIT(port);
                }
+       }
+
+       /* Now process each pin just once */
+       for_each_hpd_pin(pin) {
+               bool long_hpd;
+
+               if (!(BIT(pin) & pin_mask))
+                       continue;
 
                if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
                        /*
@@ -442,17 +478,30 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
                if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
                        continue;
 
-               if (!has_hpd_pulse) {
+               /*
+                * Delegate to ->hpd_pulse() if one of the encoders for this
+                * pin has it, otherwise let the hotplug_work deal with this
+                * pin directly.
+                */
+               if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
+                       long_hpd = long_hpd_pulse_mask & BIT(pin);
+               } else {
                        dev_priv->hotplug.event_bits |= BIT(pin);
+                       long_hpd = true;
                        queue_hp = true;
                }
 
-               if (intel_hpd_irq_storm_detect(dev_priv, pin)) {
+               if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
                        dev_priv->hotplug.event_bits &= ~BIT(pin);
                        storm_detected = true;
+                       queue_hp = true;
                }
        }
 
+       /*
+        * Disable any IRQs that storms were detected on. Polling enablement
+        * happens later in our hotplug work.
+        */
        if (storm_detected && dev_priv->display_irqs_enabled)
                dev_priv->display.hpd_irq_setup(dev_priv);
        spin_unlock(&dev_priv->irq_lock);
index 37ef540dd280a2986942d4fb8688de39c4cc8e66..bc27b691d8248473abd207138817f10b55971751 100644 (file)
@@ -108,13 +108,14 @@ fail:
  * This function reads status register to verify if HuC
  * firmware was successfully loaded.
  *
- * Returns positive value if HuC firmware is loaded and verified
- * and -ENODEV if HuC is not present.
+ * Returns: 1 if HuC firmware is loaded and verified,
+ * 0 if HuC firmware is not loaded and -ENODEV if HuC
+ * is not present on this platform.
  */
 int intel_huc_check_status(struct intel_huc *huc)
 {
        struct drm_i915_private *dev_priv = huc_to_i915(huc);
-       u32 status;
+       bool status;
 
        if (!HAS_HUC(dev_priv))
                return -ENODEV;
index cdf19553ffacd28f1097bb2096b8cc35d4654b84..5d5336fbe7b05836b7bedc28bffbfef9e6b08b4f 100644 (file)
@@ -297,8 +297,10 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
        lpe_audio_platdev_destroy(dev_priv);
 
        irq_free_desc(dev_priv->lpe_audio.irq);
-}
 
+       dev_priv->lpe_audio.irq = -1;
+       dev_priv->lpe_audio.platdev = NULL;
+}
 
 /**
  * intel_lpe_audio_notify() - notify lpe audio event
index 43957bb37a42249cfb75793fd688f191eaef2c98..08fd9b12e4d7408b8e2c3d626f921aef7152d012 100644 (file)
@@ -259,63 +259,6 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
        ce->lrc_desc = desc;
 }
 
-static struct i915_priolist *
-lookup_priolist(struct intel_engine_cs *engine, int prio)
-{
-       struct intel_engine_execlists * const execlists = &engine->execlists;
-       struct i915_priolist *p;
-       struct rb_node **parent, *rb;
-       bool first = true;
-
-       if (unlikely(execlists->no_priolist))
-               prio = I915_PRIORITY_NORMAL;
-
-find_priolist:
-       /* most positive priority is scheduled first, equal priorities fifo */
-       rb = NULL;
-       parent = &execlists->queue.rb_root.rb_node;
-       while (*parent) {
-               rb = *parent;
-               p = to_priolist(rb);
-               if (prio > p->priority) {
-                       parent = &rb->rb_left;
-               } else if (prio < p->priority) {
-                       parent = &rb->rb_right;
-                       first = false;
-               } else {
-                       return p;
-               }
-       }
-
-       if (prio == I915_PRIORITY_NORMAL) {
-               p = &execlists->default_priolist;
-       } else {
-               p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
-               /* Convert an allocation failure to a priority bump */
-               if (unlikely(!p)) {
-                       prio = I915_PRIORITY_NORMAL; /* recurses just once */
-
-                       /* To maintain ordering with all rendering, after an
-                        * allocation failure we have to disable all scheduling.
-                        * Requests will then be executed in fifo, and schedule
-                        * will ensure that dependencies are emitted in fifo.
-                        * There will be still some reordering with existing
-                        * requests, so if userspace lied about their
-                        * dependencies that reordering may be visible.
-                        */
-                       execlists->no_priolist = true;
-                       goto find_priolist;
-               }
-       }
-
-       p->priority = prio;
-       INIT_LIST_HEAD(&p->requests);
-       rb_link_node(&p->node, rb, parent);
-       rb_insert_color_cached(&p->node, &execlists->queue, first);
-
-       return p;
-}
-
 static void unwind_wa_tail(struct i915_request *rq)
 {
        rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
@@ -324,9 +267,9 @@ static void unwind_wa_tail(struct i915_request *rq)
 
 static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
 {
-       struct i915_request *rq, *rn;
-       struct i915_priolist *uninitialized_var(p);
-       int last_prio = I915_PRIORITY_INVALID;
+       struct i915_request *rq, *rn, *active = NULL;
+       struct list_head *uninitialized_var(pl);
+       int prio = I915_PRIORITY_INVALID | I915_PRIORITY_NEWCLIENT;
 
        lockdep_assert_held(&engine->timeline.lock);
 
@@ -334,19 +277,34 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
                                         &engine->timeline.requests,
                                         link) {
                if (i915_request_completed(rq))
-                       return;
+                       break;
 
                __i915_request_unsubmit(rq);
                unwind_wa_tail(rq);
 
+               GEM_BUG_ON(rq->hw_context->active);
+
                GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
-               if (rq_prio(rq) != last_prio) {
-                       last_prio = rq_prio(rq);
-                       p = lookup_priolist(engine, last_prio);
+               if (rq_prio(rq) != prio) {
+                       prio = rq_prio(rq);
+                       pl = i915_sched_lookup_priolist(engine, prio);
                }
+               GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+
+               list_add(&rq->sched.link, pl);
 
-               GEM_BUG_ON(p->priority != rq_prio(rq));
-               list_add(&rq->sched.link, &p->requests);
+               active = rq;
+       }
+
+       /*
+        * The active request is now effectively the start of a new client
+        * stream, so give it the equivalent small priority bump to prevent
+        * it being gazumped a second time by another peer.
+        */
+       if (!(prio & I915_PRIORITY_NEWCLIENT)) {
+               prio |= I915_PRIORITY_NEWCLIENT;
+               list_move_tail(&active->sched.link,
+                              i915_sched_lookup_priolist(engine, prio));
        }
 }
 
@@ -355,13 +313,8 @@ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
 {
        struct intel_engine_cs *engine =
                container_of(execlists, typeof(*engine), execlists);
-       unsigned long flags;
-
-       spin_lock_irqsave(&engine->timeline.lock, flags);
 
        __unwind_incomplete_requests(engine);
-
-       spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
 static inline void
@@ -394,13 +347,17 @@ execlists_user_end(struct intel_engine_execlists *execlists)
 static inline void
 execlists_context_schedule_in(struct i915_request *rq)
 {
+       GEM_BUG_ON(rq->hw_context->active);
+
        execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
        intel_engine_context_in(rq->engine);
+       rq->hw_context->active = rq->engine;
 }
 
 static inline void
 execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
 {
+       rq->hw_context->active = NULL;
        intel_engine_context_out(rq->engine);
        execlists_context_status_change(rq, status);
        trace_i915_request_out(rq);
@@ -417,21 +374,32 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
 
 static u64 execlists_update_context(struct i915_request *rq)
 {
+       struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
        struct intel_context *ce = rq->hw_context;
-       struct i915_hw_ppgtt *ppgtt =
-               rq->gem_context->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
        u32 *reg_state = ce->lrc_reg_state;
 
        reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
 
-       /* True 32b PPGTT with dynamic page allocation: update PDP
+       /*
+        * True 32b PPGTT with dynamic page allocation: update PDP
         * registers and point the unallocated PDPs to scratch page.
         * PML4 is allocated during ppgtt init, so this is not needed
         * in 48-bit mode.
         */
-       if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm))
+       if (!i915_vm_is_48bit(&ppgtt->vm))
                execlists_update_context_pdps(ppgtt, reg_state);
 
+       /*
+        * Make sure the context image is complete before we submit it to HW.
+        *
+        * Ostensibly, writes (including the WCB) should be flushed prior to
+        * an uncached write such as our mmio register access, the empirical
+        * evidence (esp. on Braswell) suggests that the WC write into memory
+        * may not be visible to the HW prior to the completion of the UC
+        * register write and that we may begin execution from the context
+        * before its image is complete leading to invalid PD chasing.
+        */
+       wmb();
        return ce->lrc_desc;
 }
 
@@ -669,8 +637,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
        while ((rb = rb_first_cached(&execlists->queue))) {
                struct i915_priolist *p = to_priolist(rb);
                struct i915_request *rq, *rn;
+               int i;
 
-               list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
+               priolist_for_each_request_consume(rq, rn, p, i) {
                        /*
                         * Can we combine this request with the current port?
                         * It has to be the same context/ringbuffer and not
@@ -689,11 +658,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                                 * combine this request with the last, then we
                                 * are done.
                                 */
-                               if (port == last_port) {
-                                       __list_del_many(&p->requests,
-                                                       &rq->sched.link);
+                               if (port == last_port)
                                        goto done;
-                               }
 
                                /*
                                 * If GVT overrides us we only ever submit
@@ -703,11 +669,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                                 * request) to the second port.
                                 */
                                if (ctx_single_port_submission(last->hw_context) ||
-                                   ctx_single_port_submission(rq->hw_context)) {
-                                       __list_del_many(&p->requests,
-                                                       &rq->sched.link);
+                                   ctx_single_port_submission(rq->hw_context))
                                        goto done;
-                               }
 
                                GEM_BUG_ON(last->hw_context == rq->hw_context);
 
@@ -718,15 +681,16 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                                GEM_BUG_ON(port_isset(port));
                        }
 
-                       INIT_LIST_HEAD(&rq->sched.link);
+                       list_del_init(&rq->sched.link);
+
                        __i915_request_submit(rq);
                        trace_i915_request_in(rq, port_index(port, execlists));
+
                        last = rq;
                        submit = true;
                }
 
                rb_erase_cached(&p->node, &execlists->queue);
-               INIT_LIST_HEAD(&p->requests);
                if (p->priority != I915_PRIORITY_NORMAL)
                        kmem_cache_free(engine->i915->priorities, p);
        }
@@ -861,16 +825,16 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
        /* Flush the queued requests to the timeline list (for retiring). */
        while ((rb = rb_first_cached(&execlists->queue))) {
                struct i915_priolist *p = to_priolist(rb);
+               int i;
 
-               list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
-                       INIT_LIST_HEAD(&rq->sched.link);
+               priolist_for_each_request_consume(rq, rn, p, i) {
+                       list_del_init(&rq->sched.link);
 
                        dma_fence_set_error(&rq->fence, -EIO);
                        __i915_request_submit(rq);
                }
 
                rb_erase_cached(&p->node, &execlists->queue);
-               INIT_LIST_HEAD(&p->requests);
                if (p->priority != I915_PRIORITY_NORMAL)
                        kmem_cache_free(engine->i915->priorities, p);
        }
@@ -1076,13 +1040,7 @@ static void queue_request(struct intel_engine_cs *engine,
                          struct i915_sched_node *node,
                          int prio)
 {
-       list_add_tail(&node->link,
-                     &lookup_priolist(engine, prio)->requests);
-}
-
-static void __update_queue(struct intel_engine_cs *engine, int prio)
-{
-       engine->execlists.queue_priority = prio;
+       list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio));
 }
 
 static void __submit_queue_imm(struct intel_engine_cs *engine)
@@ -1101,7 +1059,7 @@ static void __submit_queue_imm(struct intel_engine_cs *engine)
 static void submit_queue(struct intel_engine_cs *engine, int prio)
 {
        if (prio > engine->execlists.queue_priority) {
-               __update_queue(engine, prio);
+               engine->execlists.queue_priority = prio;
                __submit_queue_imm(engine);
        }
 }
@@ -1124,139 +1082,6 @@ static void execlists_submit_request(struct i915_request *request)
        spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
-static struct i915_request *sched_to_request(struct i915_sched_node *node)
-{
-       return container_of(node, struct i915_request, sched);
-}
-
-static struct intel_engine_cs *
-sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
-{
-       struct intel_engine_cs *engine = sched_to_request(node)->engine;
-
-       GEM_BUG_ON(!locked);
-
-       if (engine != locked) {
-               spin_unlock(&locked->timeline.lock);
-               spin_lock(&engine->timeline.lock);
-       }
-
-       return engine;
-}
-
-static void execlists_schedule(struct i915_request *request,
-                              const struct i915_sched_attr *attr)
-{
-       struct i915_priolist *uninitialized_var(pl);
-       struct intel_engine_cs *engine, *last;
-       struct i915_dependency *dep, *p;
-       struct i915_dependency stack;
-       const int prio = attr->priority;
-       LIST_HEAD(dfs);
-
-       GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
-
-       if (i915_request_completed(request))
-               return;
-
-       if (prio <= READ_ONCE(request->sched.attr.priority))
-               return;
-
-       /* Need BKL in order to use the temporary link inside i915_dependency */
-       lockdep_assert_held(&request->i915->drm.struct_mutex);
-
-       stack.signaler = &request->sched;
-       list_add(&stack.dfs_link, &dfs);
-
-       /*
-        * Recursively bump all dependent priorities to match the new request.
-        *
-        * A naive approach would be to use recursion:
-        * static void update_priorities(struct i915_sched_node *node, prio) {
-        *      list_for_each_entry(dep, &node->signalers_list, signal_link)
-        *              update_priorities(dep->signal, prio)
-        *      queue_request(node);
-        * }
-        * but that may have unlimited recursion depth and so runs a very
-        * real risk of overunning the kernel stack. Instead, we build
-        * a flat list of all dependencies starting with the current request.
-        * As we walk the list of dependencies, we add all of its dependencies
-        * to the end of the list (this may include an already visited
-        * request) and continue to walk onwards onto the new dependencies. The
-        * end result is a topological list of requests in reverse order, the
-        * last element in the list is the request we must execute first.
-        */
-       list_for_each_entry(dep, &dfs, dfs_link) {
-               struct i915_sched_node *node = dep->signaler;
-
-               /*
-                * Within an engine, there can be no cycle, but we may
-                * refer to the same dependency chain multiple times
-                * (redundant dependencies are not eliminated) and across
-                * engines.
-                */
-               list_for_each_entry(p, &node->signalers_list, signal_link) {
-                       GEM_BUG_ON(p == dep); /* no cycles! */
-
-                       if (i915_sched_node_signaled(p->signaler))
-                               continue;
-
-                       GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
-                       if (prio > READ_ONCE(p->signaler->attr.priority))
-                               list_move_tail(&p->dfs_link, &dfs);
-               }
-       }
-
-       /*
-        * If we didn't need to bump any existing priorities, and we haven't
-        * yet submitted this request (i.e. there is no potential race with
-        * execlists_submit_request()), we can set our own priority and skip
-        * acquiring the engine locks.
-        */
-       if (request->sched.attr.priority == I915_PRIORITY_INVALID) {
-               GEM_BUG_ON(!list_empty(&request->sched.link));
-               request->sched.attr = *attr;
-               if (stack.dfs_link.next == stack.dfs_link.prev)
-                       return;
-               __list_del_entry(&stack.dfs_link);
-       }
-
-       last = NULL;
-       engine = request->engine;
-       spin_lock_irq(&engine->timeline.lock);
-
-       /* Fifo and depth-first replacement ensure our deps execute before us */
-       list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
-               struct i915_sched_node *node = dep->signaler;
-
-               INIT_LIST_HEAD(&dep->dfs_link);
-
-               engine = sched_lock_engine(node, engine);
-
-               if (prio <= node->attr.priority)
-                       continue;
-
-               node->attr.priority = prio;
-               if (!list_empty(&node->link)) {
-                       if (last != engine) {
-                               pl = lookup_priolist(engine, prio);
-                               last = engine;
-                       }
-                       GEM_BUG_ON(pl->priority != prio);
-                       list_move_tail(&node->link, &pl->requests);
-               }
-
-               if (prio > engine->execlists.queue_priority &&
-                   i915_sw_fence_done(&sched_to_request(node)->submit)) {
-                       /* defer submission until after all of our updates */
-                       __update_queue(engine, prio);
-                       tasklet_hi_schedule(&engine->execlists.tasklet);
-               }
-       }
-
-       spin_unlock_irq(&engine->timeline.lock);
-}
-
 static void execlists_context_destroy(struct intel_context *ce)
 {
        GEM_BUG_ON(ce->pin_count);
@@ -1272,6 +1097,28 @@ static void execlists_context_destroy(struct intel_context *ce)
 
 static void execlists_context_unpin(struct intel_context *ce)
 {
+       struct intel_engine_cs *engine;
+
+       /*
+        * The tasklet may still be using a pointer to our state, via an
+        * old request. However, since we know we only unpin the context
+        * on retirement of the following request, we know that the last
+        * request referencing us will have had a completion CS interrupt.
+        * If we see that it is still active, it means that the tasklet hasn't
+        * had the chance to run yet; let it run before we teardown the
+        * reference it may use.
+        */
+       engine = READ_ONCE(ce->active);
+       if (unlikely(engine)) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&engine->timeline.lock, flags);
+               process_csb(engine);
+               spin_unlock_irqrestore(&engine->timeline.lock, flags);
+
+               GEM_BUG_ON(READ_ONCE(ce->active));
+       }
+
        i915_gem_context_unpin_hw_id(ce->gem_context);
 
        intel_ring_unpin(ce->ring);
@@ -1375,6 +1222,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
        struct intel_context *ce = to_intel_context(ctx, engine);
 
        lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+       GEM_BUG_ON(!ctx->ppgtt);
 
        if (likely(ce->pin_count++))
                return ce;
@@ -1679,7 +1527,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
        unsigned int i;
        int ret;
 
-       if (GEM_WARN_ON(engine->id != RCS))
+       if (GEM_DEBUG_WARN_ON(engine->id != RCS))
                return -EINVAL;
 
        switch (INTEL_GEN(engine->i915)) {
@@ -1718,8 +1566,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
         */
        for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
                wa_bb[i]->offset = batch_ptr - batch;
-               if (GEM_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
-                                           CACHELINE_BYTES))) {
+               if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
+                                                 CACHELINE_BYTES))) {
                        ret = -EINVAL;
                        break;
                }
@@ -1902,7 +1750,7 @@ static void execlists_reset(struct intel_engine_cs *engine,
        unsigned long flags;
        u32 *regs;
 
-       GEM_TRACE("%s request global=%x, current=%d\n",
+       GEM_TRACE("%s request global=%d, current=%d\n",
                  engine->name, request ? request->global_seqno : 0,
                  intel_engine_get_seqno(engine));
 
@@ -2029,8 +1877,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
         * it is unsafe in case of lite-restore (because the ctx is
         * not idle). PML4 is allocated during ppgtt init so this is
         * not needed in 48-bit.*/
-       if (rq->gem_context->ppgtt &&
-           (intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
+       if ((intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
            !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) &&
            !intel_vgpu_active(rq->i915)) {
                ret = intel_logical_ring_emit_pdps(rq);
@@ -2109,7 +1956,7 @@ static int gen8_emit_flush(struct i915_request *request, u32 mode)
 
        if (mode & EMIT_INVALIDATE) {
                cmd |= MI_INVALIDATE_TLB;
-               if (request->engine->id == VCS)
+               if (request->engine->class == VIDEO_DECODE_CLASS)
                        cmd |= MI_INVALIDATE_BSD;
        }
 
@@ -2294,7 +2141,7 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
 {
        engine->submit_request = execlists_submit_request;
        engine->cancel_requests = execlists_cancel_requests;
-       engine->schedule = execlists_schedule;
+       engine->schedule = i915_schedule;
        engine->execlists.tasklet.func = execlists_submission_tasklet;
 
        engine->reset.prepare = execlists_reset_prepare;
@@ -2632,7 +2479,6 @@ static void execlists_init_reg_state(u32 *regs,
                                     struct intel_ring *ring)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
        u32 base = engine->mmio_base;
        bool rcs = engine->class == RENDER_CLASS;
 
@@ -2704,12 +2550,12 @@ static void execlists_init_reg_state(u32 *regs,
        CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
        CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
 
-       if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) {
+       if (i915_vm_is_48bit(&ctx->ppgtt->vm)) {
                /* 64b PPGTT (48bit canonical)
                 * PDP0_DESCRIPTOR contains the base address to PML4 and
                 * other PDP Descriptors are ignored.
                 */
-               ASSIGN_CTX_PML4(ppgtt, regs);
+               ASSIGN_CTX_PML4(ctx->ppgtt, regs);
        }
 
        if (rcs) {
index 3e085c5f2b81bfa87daf161bd96e3eb01da5d4ac..96a8d9524b0c24445972e99c5a3bc958f379b739 100644 (file)
 #include <drm/drm_dp_dual_mode_helper.h>
 #include "intel_drv.h"
 
+/* LSPCON OUI Vendor ID(signatures) */
+#define LSPCON_VENDOR_PARADE_OUI 0x001CF8
+#define LSPCON_VENDOR_MCA_OUI 0x0060AD
+
+/* AUX addresses to write MCA AVI IF */
+#define LSPCON_MCA_AVI_IF_WRITE_OFFSET 0x5C0
+#define LSPCON_MCA_AVI_IF_CTRL 0x5DF
+#define  LSPCON_MCA_AVI_IF_KICKOFF (1 << 0)
+#define  LSPCON_MCA_AVI_IF_HANDLED (1 << 1)
+
+/* AUX addresses to write Parade AVI IF */
+#define LSPCON_PARADE_AVI_IF_WRITE_OFFSET 0x516
+#define LSPCON_PARADE_AVI_IF_CTRL 0x51E
+#define  LSPCON_PARADE_AVI_IF_KICKOFF (1 << 7)
+#define LSPCON_PARADE_AVI_IF_DATA_SIZE 32
+
 static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon)
 {
        struct intel_digital_port *dig_port =
@@ -50,6 +66,40 @@ static const char *lspcon_mode_name(enum drm_lspcon_mode mode)
        }
 }
 
+static bool lspcon_detect_vendor(struct intel_lspcon *lspcon)
+{
+       struct intel_dp *dp = lspcon_to_intel_dp(lspcon);
+       struct drm_dp_dpcd_ident *ident;
+       u32 vendor_oui;
+
+       if (drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd))) {
+               DRM_ERROR("Can't read description\n");
+               return false;
+       }
+
+       ident = &dp->desc.ident;
+       vendor_oui = (ident->oui[0] << 16) | (ident->oui[1] << 8) |
+                     ident->oui[2];
+
+       switch (vendor_oui) {
+       case LSPCON_VENDOR_MCA_OUI:
+               lspcon->vendor = LSPCON_VENDOR_MCA;
+               DRM_DEBUG_KMS("Vendor: Mega Chips\n");
+               break;
+
+       case LSPCON_VENDOR_PARADE_OUI:
+               lspcon->vendor = LSPCON_VENDOR_PARADE;
+               DRM_DEBUG_KMS("Vendor: Parade Tech\n");
+               break;
+
+       default:
+               DRM_ERROR("Invalid/Unknown vendor OUI\n");
+               return false;
+       }
+
+       return true;
+}
+
 static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
 {
        enum drm_lspcon_mode current_mode;
@@ -130,6 +180,21 @@ static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
        return true;
 }
 
+void lspcon_ycbcr420_config(struct drm_connector *connector,
+                           struct intel_crtc_state *crtc_state)
+{
+       const struct drm_display_info *info = &connector->display_info;
+       const struct drm_display_mode *adjusted_mode =
+                                       &crtc_state->base.adjusted_mode;
+
+       if (drm_mode_is_420_only(info, adjusted_mode) &&
+           connector->ycbcr_420_allowed) {
+               crtc_state->port_clock /= 2;
+               crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
+               crtc_state->lspcon_downsampling = true;
+       }
+}
+
 static bool lspcon_probe(struct intel_lspcon *lspcon)
 {
        int retry;
@@ -159,7 +224,18 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
        /* Yay ... got a LSPCON device */
        DRM_DEBUG_KMS("LSPCON detected\n");
        lspcon->mode = lspcon_wait_mode(lspcon, expected_mode);
-       lspcon->active = true;
+
+       /*
+        * In the SW state machine, lets Put LSPCON in PCON mode only.
+        * In this way, it will work with both HDMI 1.4 sinks as well as HDMI
+        * 2.0 sinks.
+        */
+       if (lspcon->mode != DRM_LSPCON_MODE_PCON) {
+               if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) {
+                       DRM_ERROR("LSPCON mode change to PCON failed\n");
+                       return false;
+               }
+       }
        return true;
 }
 
@@ -185,6 +261,255 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
        DRM_DEBUG_KMS("LSPCON DP descriptor mismatch after resume\n");
 }
 
+static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux)
+{
+       u8 avi_if_ctrl;
+       u8 retry;
+       ssize_t ret;
+
+       /* Check if LSPCON FW is ready for data */
+       for (retry = 0; retry < 5; retry++) {
+               if (retry)
+                       usleep_range(200, 300);
+
+               ret = drm_dp_dpcd_read(aux, LSPCON_PARADE_AVI_IF_CTRL,
+                                      &avi_if_ctrl, 1);
+               if (ret < 0) {
+                       DRM_ERROR("Failed to read AVI IF control\n");
+                       return false;
+               }
+
+               if ((avi_if_ctrl & LSPCON_PARADE_AVI_IF_KICKOFF) == 0)
+                       return true;
+       }
+
+       DRM_ERROR("Parade FW not ready to accept AVI IF\n");
+       return false;
+}
+
+static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux,
+                                                 uint8_t *avi_buf)
+{
+       u8 avi_if_ctrl;
+       u8 block_count = 0;
+       u8 *data;
+       uint16_t reg;
+       ssize_t ret;
+
+       while (block_count < 4) {
+               if (!lspcon_parade_fw_ready(aux)) {
+                       DRM_DEBUG_KMS("LSPCON FW not ready, block %d\n",
+                                     block_count);
+                       return false;
+               }
+
+               reg = LSPCON_PARADE_AVI_IF_WRITE_OFFSET;
+               data = avi_buf + block_count * 8;
+               ret = drm_dp_dpcd_write(aux, reg, data, 8);
+               if (ret < 0) {
+                       DRM_ERROR("Failed to write AVI IF block %d\n",
+                                 block_count);
+                       return false;
+               }
+
+               /*
+                * Once a block of data is written, we have to inform the FW
+                * about this by writing into avi infoframe control register:
+                * - set the kickoff bit[7] to 1
+                * - write the block no. to bits[1:0]
+                */
+               reg = LSPCON_PARADE_AVI_IF_CTRL;
+               avi_if_ctrl = LSPCON_PARADE_AVI_IF_KICKOFF | block_count;
+               ret = drm_dp_dpcd_write(aux, reg, &avi_if_ctrl, 1);
+               if (ret < 0) {
+                       DRM_ERROR("Failed to update (0x%x), block %d\n",
+                                 reg, block_count);
+                       return false;
+               }
+
+               block_count++;
+       }
+
+       DRM_DEBUG_KMS("Wrote AVI IF blocks successfully\n");
+       return true;
+}
+
+static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux,
+                                              const uint8_t *frame,
+                                              ssize_t len)
+{
+       uint8_t avi_if[LSPCON_PARADE_AVI_IF_DATA_SIZE] = {1, };
+
+       /*
+        * Parade's frames contains 32 bytes of data, divided
+        * into 4 frames:
+        *      Token byte (first byte of first frame, must be non-zero)
+        *      HB0 to HB2       from AVI IF (3 bytes header)
+        *      PB0 to PB27 from AVI IF (28 bytes data)
+        * So it should look like this
+        *      first block: | <token> <HB0-HB2> <DB0-DB3> |
+        *      next 3 blocks: |<DB4-DB11>|<DB12-DB19>|<DB20-DB28>|
+        */
+
+       if (len > LSPCON_PARADE_AVI_IF_DATA_SIZE - 1) {
+               DRM_ERROR("Invalid length of infoframes\n");
+               return false;
+       }
+
+       memcpy(&avi_if[1], frame, len);
+
+       if (!_lspcon_parade_write_infoframe_blocks(aux, avi_if)) {
+               DRM_DEBUG_KMS("Failed to write infoframe blocks\n");
+               return false;
+       }
+
+       return true;
+}
+
+static bool _lspcon_write_avi_infoframe_mca(struct drm_dp_aux *aux,
+                                           const uint8_t *buffer, ssize_t len)
+{
+       int ret;
+       uint32_t val = 0;
+       uint32_t retry;
+       uint16_t reg;
+       const uint8_t *data = buffer;
+
+       reg = LSPCON_MCA_AVI_IF_WRITE_OFFSET;
+       while (val < len) {
+               /* DPCD write for AVI IF can fail on a slow FW day, so retry */
+               for (retry = 0; retry < 5; retry++) {
+                       ret = drm_dp_dpcd_write(aux, reg, (void *)data, 1);
+                       if (ret == 1) {
+                               break;
+                       } else if (retry < 4) {
+                               mdelay(50);
+                               continue;
+                       } else {
+                               DRM_ERROR("DPCD write failed at:0x%x\n", reg);
+                               return false;
+                       }
+               }
+               val++; reg++; data++;
+       }
+
+       val = 0;
+       reg = LSPCON_MCA_AVI_IF_CTRL;
+       ret = drm_dp_dpcd_read(aux, reg, &val, 1);
+       if (ret < 0) {
+               DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+               return false;
+       }
+
+       /* Indicate LSPCON chip about infoframe, clear bit 1 and set bit 0 */
+       val &= ~LSPCON_MCA_AVI_IF_HANDLED;
+       val |= LSPCON_MCA_AVI_IF_KICKOFF;
+
+       ret = drm_dp_dpcd_write(aux, reg, &val, 1);
+       if (ret < 0) {
+               DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+               return false;
+       }
+
+       val = 0;
+       ret = drm_dp_dpcd_read(aux, reg, &val, 1);
+       if (ret < 0) {
+               DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+               return false;
+       }
+
+       if (val == LSPCON_MCA_AVI_IF_HANDLED)
+               DRM_DEBUG_KMS("AVI IF handled by FW\n");
+
+       return true;
+}
+
+void lspcon_write_infoframe(struct intel_encoder *encoder,
+                           const struct intel_crtc_state *crtc_state,
+                           unsigned int type,
+                           const void *frame, ssize_t len)
+{
+       bool ret;
+       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+       struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
+
+       /* LSPCON only needs AVI IF */
+       if (type != HDMI_INFOFRAME_TYPE_AVI)
+               return;
+
+       if (lspcon->vendor == LSPCON_VENDOR_MCA)
+               ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux,
+                                                     frame, len);
+       else
+               ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux,
+                                                        frame, len);
+
+       if (!ret) {
+               DRM_ERROR("Failed to write AVI infoframes\n");
+               return;
+       }
+
+       DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n");
+}
+
+void lspcon_set_infoframes(struct intel_encoder *encoder,
+                          bool enable,
+                          const struct intel_crtc_state *crtc_state,
+                          const struct drm_connector_state *conn_state)
+{
+       ssize_t ret;
+       union hdmi_infoframe frame;
+       uint8_t buf[VIDEO_DIP_DATA_SIZE];
+       struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+       struct intel_lspcon *lspcon = &dig_port->lspcon;
+       struct intel_dp *intel_dp = &dig_port->dp;
+       struct drm_connector *connector = &intel_dp->attached_connector->base;
+       const struct drm_display_mode *mode = &crtc_state->base.adjusted_mode;
+       bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported;
+
+       if (!lspcon->active) {
+               DRM_ERROR("Writing infoframes while LSPCON disabled ?\n");
+               return;
+       }
+
+       ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+                                                      mode, is_hdmi2_sink);
+       if (ret < 0) {
+               DRM_ERROR("couldn't fill AVI infoframe\n");
+               return;
+       }
+
+       if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
+               if (crtc_state->lspcon_downsampling)
+                       frame.avi.colorspace = HDMI_COLORSPACE_YUV420;
+               else
+                       frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
+       } else {
+               frame.avi.colorspace = HDMI_COLORSPACE_RGB;
+       }
+
+       drm_hdmi_avi_infoframe_quant_range(&frame.avi, mode,
+                                          crtc_state->limited_color_range ?
+                                          HDMI_QUANTIZATION_RANGE_LIMITED :
+                                          HDMI_QUANTIZATION_RANGE_FULL,
+                                          false, is_hdmi2_sink);
+
+       ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf));
+       if (ret < 0) {
+               DRM_ERROR("Failed to pack AVI IF\n");
+               return;
+       }
+
+       dig_port->write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI,
+                                 buf, ret);
+}
+
+bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
+                             const struct intel_crtc_state *pipe_config)
+{
+       return enc_to_intel_lspcon(&encoder->base)->active;
+}
+
 void lspcon_resume(struct intel_lspcon *lspcon)
 {
        enum drm_lspcon_mode expected_mode;
@@ -216,6 +541,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
        struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_connector *connector = &dp->attached_connector->base;
 
        if (!HAS_LSPCON(dev_priv)) {
                DRM_ERROR("LSPCON is not supported on this platform\n");
@@ -230,25 +556,18 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
                return false;
        }
 
-       /*
-       * In the SW state machine, lets Put LSPCON in PCON mode only.
-       * In this way, it will work with both HDMI 1.4 sinks as well as HDMI
-       * 2.0 sinks.
-       */
-       if (lspcon->active && lspcon->mode != DRM_LSPCON_MODE_PCON) {
-               if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) {
-                       DRM_ERROR("LSPCON mode change to PCON failed\n");
-                       return false;
-               }
-       }
-
        if (!intel_dp_read_dpcd(dp)) {
                DRM_ERROR("LSPCON DPCD read failed\n");
                return false;
        }
 
-       drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd));
+       if (!lspcon_detect_vendor(lspcon)) {
+               DRM_ERROR("LSPCON vendor detection failed\n");
+               return false;
+       }
 
+       connector->ycbcr_420_allowed = true;
+       lspcon->active = true;
        DRM_DEBUG_KMS("Success: LSPCON init\n");
        return true;
 }
index f9f3b0885ba595be9dad319ee78060c0ec731a0c..e6c5d985ea0afd9d6f8eadb45228dad3ee6323f0 100644 (file)
 #include <linux/acpi.h>
 
 /* Private structure for the integrated LVDS support */
-struct intel_lvds_connector {
-       struct intel_connector base;
-};
-
 struct intel_lvds_pps {
        /* 100us units */
        int t1_t2;
@@ -70,7 +66,7 @@ struct intel_lvds_encoder {
        struct intel_lvds_pps init_pps;
        u32 init_lvds_val;
 
-       struct intel_lvds_connector *attached_connector;
+       struct intel_connector *attached_connector;
 };
 
 static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
@@ -78,11 +74,6 @@ static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
        return container_of(encoder, struct intel_lvds_encoder, base.base);
 }
 
-static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
-{
-       return container_of(connector, struct intel_lvds_connector, base.base);
-}
-
 bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
                             i915_reg_t lvds_reg, enum pipe *pipe)
 {
@@ -396,7 +387,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
        struct intel_lvds_encoder *lvds_encoder =
                to_lvds_encoder(&intel_encoder->base);
        struct intel_connector *intel_connector =
-               &lvds_encoder->attached_connector->base;
+               lvds_encoder->attached_connector;
        struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
        struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
        unsigned int lvds_bpp;
@@ -418,6 +409,8 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
                pipe_config->pipe_bpp = lvds_bpp;
        }
 
+       pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+
        /*
         * We have timings from the BIOS for the panel, put them in
         * to the adjusted mode.  The CRTC will be set up for this mode,
@@ -461,15 +454,15 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
  */
 static int intel_lvds_get_modes(struct drm_connector *connector)
 {
-       struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector);
+       struct intel_connector *intel_connector = to_intel_connector(connector);
        struct drm_device *dev = connector->dev;
        struct drm_display_mode *mode;
 
        /* use cached edid if we have one */
-       if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
-               return drm_add_edid_modes(connector, lvds_connector->base.edid);
+       if (!IS_ERR_OR_NULL(intel_connector->edid))
+               return drm_add_edid_modes(connector, intel_connector->edid);
 
-       mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
+       mode = drm_mode_duplicate(dev, intel_connector->panel.fixed_mode);
        if (mode == NULL)
                return 0;
 
@@ -477,27 +470,6 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
        return 1;
 }
 
-/**
- * intel_lvds_destroy - unregister and free LVDS structures
- * @connector: connector to free
- *
- * Unregister the DDC bus for this connector then free the driver private
- * structure.
- */
-static void intel_lvds_destroy(struct drm_connector *connector)
-{
-       struct intel_lvds_connector *lvds_connector =
-               to_lvds_connector(connector);
-
-       if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
-               kfree(lvds_connector->base.edid);
-
-       intel_panel_fini(&lvds_connector->base.panel);
-
-       drm_connector_cleanup(connector);
-       kfree(connector);
-}
-
 static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
        .get_modes = intel_lvds_get_modes,
        .mode_valid = intel_lvds_mode_valid,
@@ -511,7 +483,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
        .atomic_set_property = intel_digital_connector_atomic_set_property,
        .late_register = intel_connector_register,
        .early_unregister = intel_connector_unregister,
-       .destroy = intel_lvds_destroy,
+       .destroy = intel_connector_destroy,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
        .atomic_duplicate_state = intel_digital_connector_duplicate_state,
 };
@@ -802,8 +774,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
                return i915_modparams.lvds_channel_mode == 2;
 
        /* single channel LVDS is limited to 112 MHz */
-       if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock
-           > 112999)
+       if (lvds_encoder->attached_connector->panel.fixed_mode->clock > 112999)
                return true;
 
        if (dmi_check_system(intel_dual_link_lvds))
@@ -858,7 +829,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
        struct drm_device *dev = &dev_priv->drm;
        struct intel_lvds_encoder *lvds_encoder;
        struct intel_encoder *intel_encoder;
-       struct intel_lvds_connector *lvds_connector;
        struct intel_connector *intel_connector;
        struct drm_connector *connector;
        struct drm_encoder *encoder;
@@ -911,23 +881,16 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
        if (!lvds_encoder)
                return;
 
-       lvds_connector = kzalloc(sizeof(*lvds_connector), GFP_KERNEL);
-       if (!lvds_connector) {
-               kfree(lvds_encoder);
-               return;
-       }
-
-       if (intel_connector_init(&lvds_connector->base) < 0) {
-               kfree(lvds_connector);
+       intel_connector = intel_connector_alloc();
+       if (!intel_connector) {
                kfree(lvds_encoder);
                return;
        }
 
-       lvds_encoder->attached_connector = lvds_connector;
+       lvds_encoder->attached_connector = intel_connector;
 
        intel_encoder = &lvds_encoder->base;
        encoder = &intel_encoder->base;
-       intel_connector = &lvds_connector->base;
        connector = &intel_connector->base;
        drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
                           DRM_MODE_CONNECTOR_LVDS);
@@ -1008,7 +971,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
        } else {
                edid = ERR_PTR(-ENOENT);
        }
-       lvds_connector->base.edid = edid;
+       intel_connector->edid = edid;
 
        list_for_each_entry(scan, &connector->probed_modes, head) {
                if (scan->type & DRM_MODE_TYPE_PREFERRED) {
@@ -1072,6 +1035,6 @@ failed:
        drm_connector_cleanup(connector);
        drm_encoder_cleanup(encoder);
        kfree(lvds_encoder);
-       kfree(lvds_connector);
+       intel_connector_free(intel_connector);
        return;
 }
index e034b4166d322f8182a9f31bcfb1cd224c686214..b8f106d9ecf8b1be6286b2dfaeac1c5188629528 100644 (file)
@@ -773,70 +773,6 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv)
                opregion->acpi->cadl[i] = 0;
 }
 
-void intel_opregion_register(struct drm_i915_private *dev_priv)
-{
-       struct intel_opregion *opregion = &dev_priv->opregion;
-
-       if (!opregion->header)
-               return;
-
-       if (opregion->acpi) {
-               intel_didl_outputs(dev_priv);
-               intel_setup_cadls(dev_priv);
-
-               /* Notify BIOS we are ready to handle ACPI video ext notifs.
-                * Right now, all the events are handled by the ACPI video module.
-                * We don't actually need to do anything with them. */
-               opregion->acpi->csts = 0;
-               opregion->acpi->drdy = 1;
-
-               opregion->acpi_notifier.notifier_call = intel_opregion_video_event;
-               register_acpi_notifier(&opregion->acpi_notifier);
-       }
-
-       if (opregion->asle) {
-               opregion->asle->tche = ASLE_TCHE_BLC_EN;
-               opregion->asle->ardy = ASLE_ARDY_READY;
-       }
-}
-
-void intel_opregion_unregister(struct drm_i915_private *dev_priv)
-{
-       struct intel_opregion *opregion = &dev_priv->opregion;
-
-       if (!opregion->header)
-               return;
-
-       if (opregion->asle)
-               opregion->asle->ardy = ASLE_ARDY_NOT_READY;
-
-       cancel_work_sync(&dev_priv->opregion.asle_work);
-
-       if (opregion->acpi) {
-               opregion->acpi->drdy = 0;
-
-               unregister_acpi_notifier(&opregion->acpi_notifier);
-               opregion->acpi_notifier.notifier_call = NULL;
-       }
-
-       /* just clear all opregion memory pointers now */
-       memunmap(opregion->header);
-       if (opregion->rvda) {
-               memunmap(opregion->rvda);
-               opregion->rvda = NULL;
-       }
-       if (opregion->vbt_firmware) {
-               kfree(opregion->vbt_firmware);
-               opregion->vbt_firmware = NULL;
-       }
-       opregion->header = NULL;
-       opregion->acpi = NULL;
-       opregion->swsci = NULL;
-       opregion->asle = NULL;
-       opregion->vbt = NULL;
-       opregion->lid_state = NULL;
-}
-
 static void swsci_setup(struct drm_i915_private *dev_priv)
 {
        struct intel_opregion *opregion = &dev_priv->opregion;
@@ -1115,3 +1051,97 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
 
        return ret - 1;
 }
+
+void intel_opregion_register(struct drm_i915_private *i915)
+{
+       struct intel_opregion *opregion = &i915->opregion;
+
+       if (!opregion->header)
+               return;
+
+       if (opregion->acpi) {
+               opregion->acpi_notifier.notifier_call =
+                       intel_opregion_video_event;
+               register_acpi_notifier(&opregion->acpi_notifier);
+       }
+
+       intel_opregion_resume(i915);
+}
+
+void intel_opregion_resume(struct drm_i915_private *i915)
+{
+       struct intel_opregion *opregion = &i915->opregion;
+
+       if (!opregion->header)
+               return;
+
+       if (opregion->acpi) {
+               intel_didl_outputs(i915);
+               intel_setup_cadls(i915);
+
+               /*
+                * Notify BIOS we are ready to handle ACPI video ext notifs.
+                * Right now, all the events are handled by the ACPI video
+                * module. We don't actually need to do anything with them.
+                */
+               opregion->acpi->csts = 0;
+               opregion->acpi->drdy = 1;
+       }
+
+       if (opregion->asle) {
+               opregion->asle->tche = ASLE_TCHE_BLC_EN;
+               opregion->asle->ardy = ASLE_ARDY_READY;
+       }
+
+       intel_opregion_notify_adapter(i915, PCI_D0);
+}
+
+void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
+{
+       struct intel_opregion *opregion = &i915->opregion;
+
+       if (!opregion->header)
+               return;
+
+       intel_opregion_notify_adapter(i915, state);
+
+       if (opregion->asle)
+               opregion->asle->ardy = ASLE_ARDY_NOT_READY;
+
+       cancel_work_sync(&i915->opregion.asle_work);
+
+       if (opregion->acpi)
+               opregion->acpi->drdy = 0;
+}
+
+void intel_opregion_unregister(struct drm_i915_private *i915)
+{
+       struct intel_opregion *opregion = &i915->opregion;
+
+       intel_opregion_suspend(i915, PCI_D1);
+
+       if (!opregion->header)
+               return;
+
+       if (opregion->acpi_notifier.notifier_call) {
+               unregister_acpi_notifier(&opregion->acpi_notifier);
+               opregion->acpi_notifier.notifier_call = NULL;
+       }
+
+       /* just clear all opregion memory pointers now */
+       memunmap(opregion->header);
+       if (opregion->rvda) {
+               memunmap(opregion->rvda);
+               opregion->rvda = NULL;
+       }
+       if (opregion->vbt_firmware) {
+               kfree(opregion->vbt_firmware);
+               opregion->vbt_firmware = NULL;
+       }
+       opregion->header = NULL;
+       opregion->acpi = NULL;
+       opregion->swsci = NULL;
+       opregion->asle = NULL;
+       opregion->vbt = NULL;
+       opregion->lid_state = NULL;
+}
index e8498a8cda3d1c9850f386bb00bfef90622fa12c..d84b6d2d2faee58230a926164443c1f14ffe06b5 100644 (file)
@@ -57,8 +57,14 @@ struct intel_opregion {
 #ifdef CONFIG_ACPI
 
 int intel_opregion_setup(struct drm_i915_private *dev_priv);
+
 void intel_opregion_register(struct drm_i915_private *dev_priv);
 void intel_opregion_unregister(struct drm_i915_private *dev_priv);
+
+void intel_opregion_resume(struct drm_i915_private *dev_priv);
+void intel_opregion_suspend(struct drm_i915_private *dev_priv,
+                           pci_power_t state);
+
 void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
 int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
                                  bool enable);
@@ -81,6 +87,15 @@ static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv)
 {
 }
 
+void intel_opregion_resume(struct drm_i915_private *dev_priv)
+{
+}
+
+void intel_opregion_suspend(struct drm_i915_private *dev_priv,
+                           pci_power_t state)
+{
+}
+
 static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
 {
 }
index 72eb7e48e8bc0c8f0e721f4bbfe3acc458b33ed5..20ea7c99d13a06e73a52b8d5b69866b8f35a32ad 100644 (file)
@@ -1338,7 +1338,7 @@ err_put_bo:
        return err;
 }
 
-void intel_setup_overlay(struct drm_i915_private *dev_priv)
+void intel_overlay_setup(struct drm_i915_private *dev_priv)
 {
        struct intel_overlay *overlay;
        int ret;
@@ -1387,7 +1387,7 @@ out_free:
        kfree(overlay);
 }
 
-void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
+void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
 {
        struct intel_overlay *overlay;
 
index 4a9f139e7b7383c8f244bac833679c3b5302214b..e6cd7b55c0182425cb7eb3bc32c9250d8cf7f601 100644 (file)
@@ -111,7 +111,7 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
        /* Native modes don't need fitting */
        if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
            adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h &&
-           !pipe_config->ycbcr420)
+           pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
                goto done;
 
        switch (fitting_mode) {
@@ -505,7 +505,7 @@ static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe)
 static u32 vlv_get_backlight(struct intel_connector *connector)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-       enum pipe pipe = intel_get_pipe_from_connector(connector);
+       enum pipe pipe = intel_connector_get_pipe(connector);
 
        return _vlv_get_backlight(dev_priv, pipe);
 }
@@ -763,7 +763,7 @@ static void pwm_disable_backlight(const struct drm_connector_state *old_conn_sta
        struct intel_panel *panel = &connector->panel;
 
        /* Disable the backlight */
-       pwm_config(panel->backlight.pwm, 0, CRC_PMIC_PWM_PERIOD_NS);
+       intel_panel_actually_set_backlight(old_conn_state, 0);
        usleep_range(2000, 3000);
        pwm_disable(panel->backlight.pwm);
 }
@@ -1814,11 +1814,8 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
        return 0;
 }
 
-void intel_panel_destroy_backlight(struct drm_connector *connector)
+static void intel_panel_destroy_backlight(struct intel_panel *panel)
 {
-       struct intel_connector *intel_connector = to_intel_connector(connector);
-       struct intel_panel *panel = &intel_connector->panel;
-
        /* dispose of the pwm */
        if (panel->backlight.pwm)
                pwm_put(panel->backlight.pwm);
@@ -1923,6 +1920,8 @@ void intel_panel_fini(struct intel_panel *panel)
        struct intel_connector *intel_connector =
                container_of(panel, struct intel_connector, panel);
 
+       intel_panel_destroy_backlight(panel);
+
        if (panel->fixed_mode)
                drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
 
index 1db9b8328275038f93661c0e743bc0598303d25b..897a791662c59c60adfdc4774b2c52f3e95ef4bb 100644 (file)
@@ -2493,6 +2493,9 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
        uint32_t method1, method2;
        int cpp;
 
+       if (mem_value == 0)
+               return U32_MAX;
+
        if (!intel_wm_plane_visible(cstate, pstate))
                return 0;
 
@@ -2522,6 +2525,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
        uint32_t method1, method2;
        int cpp;
 
+       if (mem_value == 0)
+               return U32_MAX;
+
        if (!intel_wm_plane_visible(cstate, pstate))
                return 0;
 
@@ -2545,6 +2551,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
 {
        int cpp;
 
+       if (mem_value == 0)
+               return U32_MAX;
+
        if (!intel_wm_plane_visible(cstate, pstate))
                return 0;
 
@@ -2881,8 +2890,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
                 * any underrun. If not able to get Dimm info assume 16GB dimm
                 * to avoid any underrun.
                 */
-               if (!dev_priv->dram_info.valid_dimm ||
-                   dev_priv->dram_info.is_16gb_dimm)
+               if (dev_priv->dram_info.is_16gb_dimm)
                        wm[0] += 1;
 
        } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -3009,6 +3017,34 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
        intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
 }
 
+static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
+{
+       /*
+        * On some SNB machines (Thinkpad X220 Tablet at least)
+        * LP3 usage can cause vblank interrupts to be lost.
+        * The DEIIR bit will go high but it looks like the CPU
+        * never gets interrupted.
+        *
+        * It's not clear whether other interrupt source could
+        * be affected or if this is somehow limited to vblank
+        * interrupts only. To play it safe we disable LP3
+        * watermarks entirely.
+        */
+       if (dev_priv->wm.pri_latency[3] == 0 &&
+           dev_priv->wm.spr_latency[3] == 0 &&
+           dev_priv->wm.cur_latency[3] == 0)
+               return;
+
+       dev_priv->wm.pri_latency[3] = 0;
+       dev_priv->wm.spr_latency[3] = 0;
+       dev_priv->wm.cur_latency[3] = 0;
+
+       DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n");
+       intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
+       intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
+       intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
+}
+
 static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 {
        intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
@@ -3025,8 +3061,10 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
        intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
        intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
 
-       if (IS_GEN6(dev_priv))
+       if (IS_GEN6(dev_priv)) {
                snb_wm_latency_quirk(dev_priv);
+               snb_wm_lp3_irq_quirk(dev_priv);
+       }
 }
 
 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
@@ -3160,7 +3198,8 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
         * and after the vblank.
         */
        *a = newstate->wm.ilk.optimal;
-       if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base))
+       if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base) ||
+           intel_state->skip_intermediate_wm)
                return 0;
 
        a->pipe_enabled |= b->pipe_enabled;
@@ -3612,15 +3651,8 @@ static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
 static bool
 intel_has_sagv(struct drm_i915_private *dev_priv)
 {
-       if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
-           IS_CANNONLAKE(dev_priv))
-               return true;
-
-       if (IS_SKYLAKE(dev_priv) &&
-           dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
-               return true;
-
-       return false;
+       return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
+               dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
 }
 
 /*
@@ -3784,7 +3816,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
 
 static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
                              const struct intel_crtc_state *cstate,
-                             const unsigned int total_data_rate,
+                             const u64 total_data_rate,
                              const int num_active,
                              struct skl_ddb_allocation *ddb)
 {
@@ -3798,12 +3830,12 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
                return ddb_size - 4; /* 4 blocks for bypass path allocation */
 
        adjusted_mode = &cstate->base.adjusted_mode;
-       total_data_bw = (u64)total_data_rate * drm_mode_vrefresh(adjusted_mode);
+       total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
 
        /*
         * 12GB/s is maximum BW supported by single DBuf slice.
         */
-       if (total_data_bw >= GBps(12) || num_active > 1) {
+       if (num_active > 1 || total_data_bw >= GBps(12)) {
                ddb->enabled_slices = 2;
        } else {
                ddb->enabled_slices = 1;
@@ -3814,16 +3846,15 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
 }
 
 static void
-skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
+skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
                                   const struct intel_crtc_state *cstate,
-                                  const unsigned int total_data_rate,
+                                  const u64 total_data_rate,
                                   struct skl_ddb_allocation *ddb,
                                   struct skl_ddb_entry *alloc, /* out */
                                   int *num_active /* out */)
 {
        struct drm_atomic_state *state = cstate->base.state;
        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *for_crtc = cstate->base.crtc;
        const struct drm_crtc_state *crtc_state;
        const struct drm_crtc *crtc;
@@ -3945,14 +3976,9 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
                                      val & PLANE_CTL_ALPHA_MASK);
 
        val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
-       /*
-        * FIXME: add proper NV12 support for ICL. Avoid reading unclaimed
-        * registers for now.
-        */
-       if (INTEL_GEN(dev_priv) < 11)
+       if (fourcc == DRM_FORMAT_NV12 && INTEL_GEN(dev_priv) < 11) {
                val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
 
-       if (fourcc == DRM_FORMAT_NV12) {
                skl_ddb_entry_init_from_hw(dev_priv,
                                           &ddb->plane[pipe][plane_id], val2);
                skl_ddb_entry_init_from_hw(dev_priv,
@@ -4139,23 +4165,24 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
        return 0;
 }
 
-static unsigned int
+static u64
 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
-                            const struct drm_plane_state *pstate,
+                            const struct intel_plane_state *intel_pstate,
                             const int plane)
 {
-       struct intel_plane *intel_plane = to_intel_plane(pstate->plane);
-       struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
+       struct intel_plane *intel_plane =
+               to_intel_plane(intel_pstate->base.plane);
        uint32_t data_rate;
        uint32_t width = 0, height = 0;
        struct drm_framebuffer *fb;
        u32 format;
        uint_fixed_16_16_t down_scale_amount;
+       u64 rate;
 
        if (!intel_pstate->base.visible)
                return 0;
 
-       fb = pstate->fb;
+       fb = intel_pstate->base.fb;
        format = fb->format->format;
 
        if (intel_plane->id == PLANE_CURSOR)
@@ -4177,28 +4204,26 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
                height /= 2;
        }
 
-       data_rate = width * height * fb->format->cpp[plane];
+       data_rate = width * height;
 
        down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
 
-       return mul_round_up_u32_fixed16(data_rate, down_scale_amount);
+       rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
+
+       rate *= fb->format->cpp[plane];
+       return rate;
 }
 
-/*
- * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
- * a 8192x4096@32bpp framebuffer:
- *   3 * 4096 * 8192  * 4 < 2^32
- */
-static unsigned int
+static u64
 skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
-                                unsigned int *plane_data_rate,
-                                unsigned int *uv_plane_data_rate)
+                                u64 *plane_data_rate,
+                                u64 *uv_plane_data_rate)
 {
        struct drm_crtc_state *cstate = &intel_cstate->base;
        struct drm_atomic_state *state = cstate->state;
        struct drm_plane *plane;
        const struct drm_plane_state *pstate;
-       unsigned int total_data_rate = 0;
+       u64 total_data_rate = 0;
 
        if (WARN_ON(!state))
                return 0;
@@ -4206,26 +4231,81 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
        /* Calculate and cache data rate for each plane */
        drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
                enum plane_id plane_id = to_intel_plane(plane)->id;
-               unsigned int rate;
+               u64 rate;
+               const struct intel_plane_state *intel_pstate =
+                       to_intel_plane_state(pstate);
 
                /* packed/y */
                rate = skl_plane_relative_data_rate(intel_cstate,
-                                                   pstate, 0);
+                                                   intel_pstate, 0);
                plane_data_rate[plane_id] = rate;
-
                total_data_rate += rate;
 
                /* uv-plane */
                rate = skl_plane_relative_data_rate(intel_cstate,
-                                                   pstate, 1);
+                                                   intel_pstate, 1);
                uv_plane_data_rate[plane_id] = rate;
-
                total_data_rate += rate;
        }
 
        return total_data_rate;
 }
 
+static u64
+icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
+                                u64 *plane_data_rate)
+{
+       struct drm_crtc_state *cstate = &intel_cstate->base;
+       struct drm_atomic_state *state = cstate->state;
+       struct drm_plane *plane;
+       const struct drm_plane_state *pstate;
+       u64 total_data_rate = 0;
+
+       if (WARN_ON(!state))
+               return 0;
+
+       /* Calculate and cache data rate for each plane */
+       drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
+               const struct intel_plane_state *intel_pstate =
+                       to_intel_plane_state(pstate);
+               enum plane_id plane_id = to_intel_plane(plane)->id;
+               u64 rate;
+
+               if (!intel_pstate->linked_plane) {
+                       rate = skl_plane_relative_data_rate(intel_cstate,
+                                                           intel_pstate, 0);
+                       plane_data_rate[plane_id] = rate;
+                       total_data_rate += rate;
+               } else {
+                       enum plane_id y_plane_id;
+
+                       /*
+                        * The slave plane might not iterate in
+                        * drm_atomic_crtc_state_for_each_plane_state(),
+                        * and needs the master plane state which may be
+                        * NULL if we try get_new_plane_state(), so we
+                        * always calculate from the master.
+                        */
+                       if (intel_pstate->slave)
+                               continue;
+
+                       /* Y plane rate is calculated on the slave */
+                       rate = skl_plane_relative_data_rate(intel_cstate,
+                                                           intel_pstate, 0);
+                       y_plane_id = intel_pstate->linked_plane->id;
+                       plane_data_rate[y_plane_id] = rate;
+                       total_data_rate += rate;
+
+                       rate = skl_plane_relative_data_rate(intel_cstate,
+                                                           intel_pstate, 1);
+                       plane_data_rate[plane_id] = rate;
+                       total_data_rate += rate;
+               }
+       }
+
+       return total_data_rate;
+}
+
 static uint16_t
 skl_ddb_min_alloc(const struct drm_plane_state *pstate, const int plane)
 {
@@ -4298,15 +4378,25 @@ skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
 
        drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
                enum plane_id plane_id = to_intel_plane(plane)->id;
+               struct intel_plane_state *plane_state = to_intel_plane_state(pstate);
 
                if (plane_id == PLANE_CURSOR)
                        continue;
 
-               if (!pstate->visible)
+               /* slave plane must be invisible and calculated from master */
+               if (!pstate->visible || WARN_ON(plane_state->slave))
                        continue;
 
-               minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
-               uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
+               if (!plane_state->linked_plane) {
+                       minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
+                       uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
+               } else {
+                       enum plane_id y_plane_id =
+                               plane_state->linked_plane->id;
+
+                       minimum[y_plane_id] = skl_ddb_min_alloc(pstate, 0);
+                       minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
+               }
        }
 
        minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
@@ -4318,18 +4408,18 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 {
        struct drm_atomic_state *state = cstate->base.state;
        struct drm_crtc *crtc = cstate->base.crtc;
-       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        enum pipe pipe = intel_crtc->pipe;
        struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
        uint16_t alloc_size, start;
        uint16_t minimum[I915_MAX_PLANES] = {};
        uint16_t uv_minimum[I915_MAX_PLANES] = {};
-       unsigned int total_data_rate;
+       u64 total_data_rate;
        enum plane_id plane_id;
        int num_active;
-       unsigned int plane_data_rate[I915_MAX_PLANES] = {};
-       unsigned int uv_plane_data_rate[I915_MAX_PLANES] = {};
+       u64 plane_data_rate[I915_MAX_PLANES] = {};
+       u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
        uint16_t total_min_blocks = 0;
 
        /* Clear the partitioning for disabled planes. */
@@ -4344,11 +4434,18 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
                return 0;
        }
 
-       total_data_rate = skl_get_total_relative_data_rate(cstate,
-                                                          plane_data_rate,
-                                                          uv_plane_data_rate);
-       skl_ddb_get_pipe_allocation_limits(dev, cstate, total_data_rate, ddb,
-                                          alloc, &num_active);
+       if (INTEL_GEN(dev_priv) < 11)
+               total_data_rate =
+                       skl_get_total_relative_data_rate(cstate,
+                                                        plane_data_rate,
+                                                        uv_plane_data_rate);
+       else
+               total_data_rate =
+                       icl_get_total_relative_data_rate(cstate,
+                                                        plane_data_rate);
+
+       skl_ddb_get_pipe_allocation_limits(dev_priv, cstate, total_data_rate,
+                                          ddb, alloc, &num_active);
        alloc_size = skl_ddb_entry_size(alloc);
        if (alloc_size == 0)
                return 0;
@@ -4388,7 +4485,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 
        start = alloc->start;
        for_each_plane_id_on_crtc(intel_crtc, plane_id) {
-               unsigned int data_rate, uv_data_rate;
+               u64 data_rate, uv_data_rate;
                uint16_t plane_blocks, uv_plane_blocks;
 
                if (plane_id == PLANE_CURSOR)
@@ -4402,8 +4499,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
                 * result is < available as data_rate / total_data_rate < 1
                 */
                plane_blocks = minimum[plane_id];
-               plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
-                                       total_data_rate);
+               plane_blocks += div64_u64(alloc_size * data_rate, total_data_rate);
 
                /* Leave disabled planes at (0,0) */
                if (data_rate) {
@@ -4417,8 +4513,10 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
                uv_data_rate = uv_plane_data_rate[plane_id];
 
                uv_plane_blocks = uv_minimum[plane_id];
-               uv_plane_blocks += div_u64((uint64_t)alloc_size * uv_data_rate,
-                                          total_data_rate);
+               uv_plane_blocks += div64_u64(alloc_size * uv_data_rate, total_data_rate);
+
+               /* Gen11+ uses a separate plane for UV watermarks */
+               WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks);
 
                if (uv_data_rate) {
                        ddb->uv_plane[pipe][plane_id].start = start;
@@ -4476,7 +4574,7 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
 }
 
 static uint_fixed_16_16_t
-intel_get_linetime_us(struct intel_crtc_state *cstate)
+intel_get_linetime_us(const struct intel_crtc_state *cstate)
 {
        uint32_t pixel_rate;
        uint32_t crtc_htotal;
@@ -4520,7 +4618,7 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
 
 static int
 skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
-                           struct intel_crtc_state *cstate,
+                           const struct intel_crtc_state *cstate,
                            const struct intel_plane_state *intel_pstate,
                            struct skl_wm_params *wp, int plane_id)
 {
@@ -4627,7 +4725,7 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
 }
 
 static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
-                               struct intel_crtc_state *cstate,
+                               const struct intel_crtc_state *cstate,
                                const struct intel_plane_state *intel_pstate,
                                uint16_t ddb_allocation,
                                int level,
@@ -4672,15 +4770,24 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
        } else {
                if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal /
                     wp->dbuf_block_size < 1) &&
-                    (wp->plane_bytes_per_line / wp->dbuf_block_size < 1))
+                    (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
                        selected_result = method2;
-               else if (ddb_allocation >=
-                        fixed16_to_u32_round_up(wp->plane_blocks_per_line))
-                       selected_result = min_fixed16(method1, method2);
-               else if (latency >= wp->linetime_us)
-                       selected_result = min_fixed16(method1, method2);
-               else
+               } else if (ddb_allocation >=
+                        fixed16_to_u32_round_up(wp->plane_blocks_per_line)) {
+                       if (IS_GEN9(dev_priv) &&
+                           !IS_GEMINILAKE(dev_priv))
+                               selected_result = min_fixed16(method1, method2);
+                       else
+                               selected_result = method2;
+               } else if (latency >= wp->linetime_us) {
+                       if (IS_GEN9(dev_priv) &&
+                           !IS_GEMINILAKE(dev_priv))
+                               selected_result = min_fixed16(method1, method2);
+                       else
+                               selected_result = method2;
+               } else {
                        selected_result = method1;
+               }
        }
 
        res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
@@ -4756,17 +4863,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
                }
        }
 
-       /*
-        * Display WA #826 (SKL:ALL, BXT:ALL) & #1059 (CNL:A)
-        * disable wm level 1-7 on NV12 planes
-        */
-       if (wp->is_planar && level >= 1 &&
-           (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
-            IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))) {
-               result->plane_en = false;
-               return 0;
-       }
-
        /* The number of lines are ignored for the level 0 watermark. */
        result->plane_res_b = res_blocks;
        result->plane_res_l = res_lines;
@@ -4778,38 +4874,22 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 static int
 skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
                      struct skl_ddb_allocation *ddb,
-                     struct intel_crtc_state *cstate,
+                     const struct intel_crtc_state *cstate,
                      const struct intel_plane_state *intel_pstate,
+                     uint16_t ddb_blocks,
                      const struct skl_wm_params *wm_params,
                      struct skl_plane_wm *wm,
-                     int plane_id)
+                     struct skl_wm_level *levels)
 {
-       struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
-       struct drm_plane *plane = intel_pstate->base.plane;
-       struct intel_plane *intel_plane = to_intel_plane(plane);
-       uint16_t ddb_blocks;
-       enum pipe pipe = intel_crtc->pipe;
        int level, max_level = ilk_wm_max_level(dev_priv);
-       enum plane_id intel_plane_id = intel_plane->id;
+       struct skl_wm_level *result_prev = &levels[0];
        int ret;
 
        if (WARN_ON(!intel_pstate->base.fb))
                return -EINVAL;
 
-       ddb_blocks = plane_id ?
-                    skl_ddb_entry_size(&ddb->uv_plane[pipe][intel_plane_id]) :
-                    skl_ddb_entry_size(&ddb->plane[pipe][intel_plane_id]);
-
        for (level = 0; level <= max_level; level++) {
-               struct skl_wm_level *result = plane_id ? &wm->uv_wm[level] :
-                                                         &wm->wm[level];
-               struct skl_wm_level *result_prev;
-
-               if (level)
-                       result_prev = plane_id ? &wm->uv_wm[level - 1] :
-                                                 &wm->wm[level - 1];
-               else
-                       result_prev = plane_id ? &wm->uv_wm[0] : &wm->wm[0];
+               struct skl_wm_level *result = &levels[level];
 
                ret = skl_compute_plane_wm(dev_priv,
                                           cstate,
@@ -4821,6 +4901,8 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
                                           result);
                if (ret)
                        return ret;
+
+               result_prev = result;
        }
 
        if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
@@ -4830,7 +4912,7 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
 }
 
 static uint32_t
-skl_compute_linetime_wm(struct intel_crtc_state *cstate)
+skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
 {
        struct drm_atomic_state *state = cstate->base.state;
        struct drm_i915_private *dev_priv = to_i915(state->dev);
@@ -4852,7 +4934,7 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
        return linetime_wm;
 }
 
-static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
+static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
                                      struct skl_wm_params *wp,
                                      struct skl_wm_level *wm_l0,
                                      uint16_t ddb_allocation,
@@ -4862,7 +4944,7 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
        const struct drm_i915_private *dev_priv = to_i915(dev);
        uint16_t trans_min, trans_y_tile_min;
        const uint16_t trans_amount = 10; /* This is configurable amount */
-       uint16_t trans_offset_b, res_blocks;
+       uint16_t wm0_sel_res_b, trans_offset_b, res_blocks;
 
        if (!cstate->base.active)
                goto exit;
@@ -4875,19 +4957,31 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
        if (!dev_priv->ipc_enabled)
                goto exit;
 
-       trans_min = 0;
-       if (INTEL_GEN(dev_priv) >= 10)
+       trans_min = 14;
+       if (INTEL_GEN(dev_priv) >= 11)
                trans_min = 4;
 
        trans_offset_b = trans_min + trans_amount;
 
+       /*
+        * The spec asks for Selected Result Blocks for wm0 (the real value),
+        * not Result Blocks (the integer value). Pay attention to the capital
+        * letters. The value wm_l0->plane_res_b is actually Result Blocks, but
+        * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
+        * and since we later will have to get the ceiling of the sum in the
+        * transition watermarks calculation, we can just pretend Selected
+        * Result Blocks is Result Blocks minus 1 and it should work for the
+        * current platforms.
+        */
+       wm0_sel_res_b = wm_l0->plane_res_b - 1;
+
        if (wp->y_tiled) {
                trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2,
                                                        wp->y_tile_minimum);
-               res_blocks = max(wm_l0->plane_res_b, trans_y_tile_min) +
+               res_blocks = max(wm0_sel_res_b, trans_y_tile_min) +
                                trans_offset_b;
        } else {
-               res_blocks = wm_l0->plane_res_b + trans_offset_b;
+               res_blocks = wm0_sel_res_b + trans_offset_b;
 
                /* WA BUG:1938466 add one block for non y-tile planes */
                if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
@@ -4907,16 +5001,101 @@ exit:
        trans_wm->plane_en = false;
 }
 
+static int __skl_build_plane_wm_single(struct skl_ddb_allocation *ddb,
+                                      struct skl_pipe_wm *pipe_wm,
+                                      enum plane_id plane_id,
+                                      const struct intel_crtc_state *cstate,
+                                      const struct intel_plane_state *pstate,
+                                      int color_plane)
+{
+       struct drm_i915_private *dev_priv = to_i915(pstate->base.plane->dev);
+       struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+       enum pipe pipe = to_intel_plane(pstate->base.plane)->pipe;
+       struct skl_wm_params wm_params;
+       uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
+       int ret;
+
+       ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate,
+                                         &wm_params, color_plane);
+       if (ret)
+               return ret;
+
+       ret = skl_compute_wm_levels(dev_priv, ddb, cstate, pstate,
+                                   ddb_blocks, &wm_params, wm, wm->wm);
+
+       if (ret)
+               return ret;
+
+       skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
+                                 ddb_blocks, &wm->trans_wm);
+
+       return 0;
+}
+
+static int skl_build_plane_wm_single(struct skl_ddb_allocation *ddb,
+                                    struct skl_pipe_wm *pipe_wm,
+                                    const struct intel_crtc_state *cstate,
+                                    const struct intel_plane_state *pstate)
+{
+       enum plane_id plane_id = to_intel_plane(pstate->base.plane)->id;
+
+       return __skl_build_plane_wm_single(ddb, pipe_wm, plane_id, cstate, pstate, 0);
+}
+
+static int skl_build_plane_wm_planar(struct skl_ddb_allocation *ddb,
+                                    struct skl_pipe_wm *pipe_wm,
+                                    const struct intel_crtc_state *cstate,
+                                    const struct intel_plane_state *pstate)
+{
+       struct intel_plane *plane = to_intel_plane(pstate->base.plane);
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum plane_id plane_id = plane->id;
+       struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+       struct skl_wm_params wm_params;
+       enum pipe pipe = plane->pipe;
+       uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
+       int ret;
+
+       ret = __skl_build_plane_wm_single(ddb, pipe_wm, plane_id, cstate, pstate, 0);
+       if (ret)
+               return ret;
+
+       /* uv plane watermarks must also be validated for NV12/Planar */
+       ddb_blocks = skl_ddb_entry_size(&ddb->uv_plane[pipe][plane_id]);
+
+       ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate, &wm_params, 1);
+       if (ret)
+               return ret;
+
+       return skl_compute_wm_levels(dev_priv, ddb, cstate, pstate,
+                                    ddb_blocks, &wm_params, wm, wm->uv_wm);
+}
+
+static int icl_build_plane_wm_planar(struct skl_ddb_allocation *ddb,
+                                    struct skl_pipe_wm *pipe_wm,
+                                    const struct intel_crtc_state *cstate,
+                                    const struct intel_plane_state *pstate)
+{
+       int ret;
+       enum plane_id y_plane_id = pstate->linked_plane->id;
+       enum plane_id uv_plane_id = to_intel_plane(pstate->base.plane)->id;
+
+       ret = __skl_build_plane_wm_single(ddb, pipe_wm, y_plane_id,
+                                         cstate, pstate, 0);
+       if (ret)
+               return ret;
+
+       return __skl_build_plane_wm_single(ddb, pipe_wm, uv_plane_id,
+                                          cstate, pstate, 1);
+}
+
 static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
                             struct skl_ddb_allocation *ddb,
                             struct skl_pipe_wm *pipe_wm)
 {
-       struct drm_device *dev = cstate->base.crtc->dev;
        struct drm_crtc_state *crtc_state = &cstate->base;
-       const struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_plane *plane;
        const struct drm_plane_state *pstate;
-       struct skl_plane_wm *wm;
        int ret;
 
        /*
@@ -4928,44 +5107,21 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
        drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
                const struct intel_plane_state *intel_pstate =
                                                to_intel_plane_state(pstate);
-               enum plane_id plane_id = to_intel_plane(plane)->id;
-               struct skl_wm_params wm_params;
-               enum pipe pipe = to_intel_crtc(cstate->base.crtc)->pipe;
-               uint16_t ddb_blocks;
 
-               wm = &pipe_wm->planes[plane_id];
-               ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
+               /* Watermarks calculated in master */
+               if (intel_pstate->slave)
+                       continue;
 
-               ret = skl_compute_plane_wm_params(dev_priv, cstate,
-                                                 intel_pstate, &wm_params, 0);
-               if (ret)
-                       return ret;
+               if (intel_pstate->linked_plane)
+                       ret = icl_build_plane_wm_planar(ddb, pipe_wm, cstate, intel_pstate);
+               else if (intel_pstate->base.fb &&
+                        intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
+                       ret = skl_build_plane_wm_planar(ddb, pipe_wm, cstate, intel_pstate);
+               else
+                       ret = skl_build_plane_wm_single(ddb, pipe_wm, cstate, intel_pstate);
 
-               ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
-                                           intel_pstate, &wm_params, wm, 0);
                if (ret)
                        return ret;
-
-               skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
-                                         ddb_blocks, &wm->trans_wm);
-
-               /* uv plane watermarks must also be validated for NV12/Planar */
-               if (wm_params.is_planar) {
-                       memset(&wm_params, 0, sizeof(struct skl_wm_params));
-                       wm->is_planar = true;
-
-                       ret = skl_compute_plane_wm_params(dev_priv, cstate,
-                                                         intel_pstate,
-                                                         &wm_params, 1);
-                       if (ret)
-                               return ret;
-
-                       ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
-                                                   intel_pstate, &wm_params,
-                                                   wm, 1);
-                       if (ret)
-                               return ret;
-               }
        }
 
        pipe_wm->linetime = skl_compute_linetime_wm(cstate);
@@ -5016,14 +5172,7 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
        skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
                           &wm->trans_wm);
 
-       skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
-                           &ddb->plane[pipe][plane_id]);
-       /* FIXME: add proper NV12 support for ICL. */
-       if (INTEL_GEN(dev_priv) >= 11)
-               return skl_ddb_entry_write(dev_priv,
-                                          PLANE_BUF_CFG(pipe, plane_id),
-                                          &ddb->plane[pipe][plane_id]);
-       if (wm->is_planar) {
+       if (wm->is_planar && INTEL_GEN(dev_priv) < 11) {
                skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
                                    &ddb->uv_plane[pipe][plane_id]);
                skl_ddb_entry_write(dev_priv,
@@ -5032,7 +5181,8 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
        } else {
                skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
                                    &ddb->plane[pipe][plane_id]);
-               I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0);
+               if (INTEL_GEN(dev_priv) < 11)
+                       I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0);
        }
 }
 
@@ -5076,16 +5226,15 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
        return a->start < b->end && b->start < a->end;
 }
 
-bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
-                                const struct skl_ddb_entry **entries,
-                                const struct skl_ddb_entry *ddb,
-                                int ignore)
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
+                                const struct skl_ddb_entry entries[],
+                                int num_entries, int ignore_idx)
 {
-       enum pipe pipe;
+       int i;
 
-       for_each_pipe(dev_priv, pipe) {
-               if (pipe != ignore && entries[pipe] &&
-                   skl_ddb_entries_overlap(ddb, entries[pipe]))
+       for (i = 0; i < num_entries; i++) {
+               if (i != ignore_idx &&
+                   skl_ddb_entries_overlap(ddb, &entries[i]))
                        return true;
        }
 
@@ -5137,11 +5286,12 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
        struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
        struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
-       struct drm_plane_state *plane_state;
        struct drm_plane *plane;
        enum pipe pipe = intel_crtc->pipe;
 
        drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
+               struct drm_plane_state *plane_state;
+               struct intel_plane *linked;
                enum plane_id plane_id = to_intel_plane(plane)->id;
 
                if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
@@ -5153,6 +5303,15 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
                plane_state = drm_atomic_get_plane_state(state, plane);
                if (IS_ERR(plane_state))
                        return PTR_ERR(plane_state);
+
+               /* Make sure linked plane is updated too */
+               linked = to_intel_plane_state(plane_state)->linked_plane;
+               if (!linked)
+                       continue;
+
+               plane_state = drm_atomic_get_plane_state(state, &linked->base);
+               if (IS_ERR(plane_state))
+                       return PTR_ERR(plane_state);
        }
 
        return 0;
@@ -5211,11 +5370,11 @@ skl_print_wm_changes(const struct drm_atomic_state *state)
                        if (skl_ddb_entry_equal(old, new))
                                continue;
 
-                       DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
-                                        intel_plane->base.base.id,
-                                        intel_plane->base.name,
-                                        old->start, old->end,
-                                        new->start, new->end);
+                       DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
+                                     intel_plane->base.base.id,
+                                     intel_plane->base.name,
+                                     old->start, old->end,
+                                     new->start, new->end);
                }
        }
 }
@@ -6117,14 +6276,8 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
 {
        u32 val;
 
-       /* Display WA #0477 WaDisableIPC: skl */
-       if (IS_SKYLAKE(dev_priv))
-               dev_priv->ipc_enabled = false;
-
-       /* Display WA #1141: SKL:all KBL:all CFL */
-       if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
-           !dev_priv->dram_info.symmetric_memory)
-               dev_priv->ipc_enabled = false;
+       if (!HAS_IPC(dev_priv))
+               return;
 
        val = I915_READ(DISP_ARB_CTL2);
 
@@ -6138,11 +6291,15 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
 
 void intel_init_ipc(struct drm_i915_private *dev_priv)
 {
-       dev_priv->ipc_enabled = false;
        if (!HAS_IPC(dev_priv))
                return;
 
-       dev_priv->ipc_enabled = true;
+       /* Display WA #1141: SKL:all KBL:all CFL */
+       if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+               dev_priv->ipc_enabled = dev_priv->dram_info.symmetric_memory;
+       else
+               dev_priv->ipc_enabled = true;
+
        intel_enable_ipc(dev_priv);
 }
 
@@ -8736,6 +8893,10 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
        /* This is not an Wa. Enable to reduce Sampler power */
        I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
                   I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
+
+       /* WaEnable32PlaneMode:icl */
+       I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
+                  _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
 }
 
 static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -9313,8 +9474,6 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
 /* Set up chip specific power management-related functions */
 void intel_init_pm(struct drm_i915_private *dev_priv)
 {
-       intel_fbc_init(dev_priv);
-
        /* For cxsr */
        if (IS_PINEVIEW(dev_priv))
                i915_pineview_get_mem_freq(dev_priv);
index b6838b525502ea68f8d472dbd703d51bfa1d6561..54fa17a5596a4850cfde042d76a15bffa5b54258 100644 (file)
@@ -71,6 +71,10 @@ static bool psr_global_enabled(u32 debug)
 static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
                               const struct intel_crtc_state *crtc_state)
 {
+       /* Disable PSR2 by default for all platforms */
+       if (i915_modparams.enable_psr == -1)
+               return false;
+
        switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
        case I915_PSR_DEBUG_FORCE_PSR1:
                return false;
@@ -79,25 +83,42 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
        }
 }
 
+static int edp_psr_shift(enum transcoder cpu_transcoder)
+{
+       switch (cpu_transcoder) {
+       case TRANSCODER_A:
+               return EDP_PSR_TRANSCODER_A_SHIFT;
+       case TRANSCODER_B:
+               return EDP_PSR_TRANSCODER_B_SHIFT;
+       case TRANSCODER_C:
+               return EDP_PSR_TRANSCODER_C_SHIFT;
+       default:
+               MISSING_CASE(cpu_transcoder);
+               /* fallthrough */
+       case TRANSCODER_EDP:
+               return EDP_PSR_TRANSCODER_EDP_SHIFT;
+       }
+}
+
 void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug)
 {
        u32 debug_mask, mask;
+       enum transcoder cpu_transcoder;
+       u32 transcoders = BIT(TRANSCODER_EDP);
+
+       if (INTEL_GEN(dev_priv) >= 8)
+               transcoders |= BIT(TRANSCODER_A) |
+                              BIT(TRANSCODER_B) |
+                              BIT(TRANSCODER_C);
 
-       mask = EDP_PSR_ERROR(TRANSCODER_EDP);
-       debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) |
-                    EDP_PSR_PRE_ENTRY(TRANSCODER_EDP);
-
-       if (INTEL_GEN(dev_priv) >= 8) {
-               mask |= EDP_PSR_ERROR(TRANSCODER_A) |
-                       EDP_PSR_ERROR(TRANSCODER_B) |
-                       EDP_PSR_ERROR(TRANSCODER_C);
-
-               debug_mask |= EDP_PSR_POST_EXIT(TRANSCODER_A) |
-                             EDP_PSR_PRE_ENTRY(TRANSCODER_A) |
-                             EDP_PSR_POST_EXIT(TRANSCODER_B) |
-                             EDP_PSR_PRE_ENTRY(TRANSCODER_B) |
-                             EDP_PSR_POST_EXIT(TRANSCODER_C) |
-                             EDP_PSR_PRE_ENTRY(TRANSCODER_C);
+       debug_mask = 0;
+       mask = 0;
+       for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+               int shift = edp_psr_shift(cpu_transcoder);
+
+               mask |= EDP_PSR_ERROR(shift);
+               debug_mask |= EDP_PSR_POST_EXIT(shift) |
+                             EDP_PSR_PRE_ENTRY(shift);
        }
 
        if (debug & I915_PSR_DEBUG_IRQ)
@@ -155,18 +176,20 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
                               BIT(TRANSCODER_C);
 
        for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+               int shift = edp_psr_shift(cpu_transcoder);
+
                /* FIXME: Exit PSR and link train manually when this happens. */
-               if (psr_iir & EDP_PSR_ERROR(cpu_transcoder))
+               if (psr_iir & EDP_PSR_ERROR(shift))
                        DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n",
                                      transcoder_name(cpu_transcoder));
 
-               if (psr_iir & EDP_PSR_PRE_ENTRY(cpu_transcoder)) {
+               if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) {
                        dev_priv->psr.last_entry_attempt = time_ns;
                        DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
                                      transcoder_name(cpu_transcoder));
                }
 
-               if (psr_iir & EDP_PSR_POST_EXIT(cpu_transcoder)) {
+               if (psr_iir & EDP_PSR_POST_EXIT(shift)) {
                        dev_priv->psr.last_exit = time_ns;
                        DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
                                      transcoder_name(cpu_transcoder));
@@ -294,7 +317,8 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
                psr_vsc.sdp_header.HB3 = 0x8;
        }
 
-       intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state,
+       intel_dig_port->write_infoframe(&intel_dig_port->base,
+                                       crtc_state,
                                        DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
 }
 
@@ -553,11 +577,31 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
        dev_priv->psr.active = true;
 }
 
+static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv,
+                                        enum transcoder cpu_transcoder)
+{
+       static const i915_reg_t regs[] = {
+               [TRANSCODER_A] = CHICKEN_TRANS_A,
+               [TRANSCODER_B] = CHICKEN_TRANS_B,
+               [TRANSCODER_C] = CHICKEN_TRANS_C,
+               [TRANSCODER_EDP] = CHICKEN_TRANS_EDP,
+       };
+
+       WARN_ON(INTEL_GEN(dev_priv) < 9);
+
+       if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) ||
+                   !regs[cpu_transcoder].reg))
+               cpu_transcoder = TRANSCODER_A;
+
+       return regs[cpu_transcoder];
+}
+
 static void intel_psr_enable_source(struct intel_dp *intel_dp,
                                    const struct intel_crtc_state *crtc_state)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+       u32 mask;
 
        /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
         * use hardcoded values PSR AUX transactions
@@ -566,37 +610,34 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
                hsw_psr_setup_aux(intel_dp);
 
        if (dev_priv->psr.psr2_enabled) {
-               u32 chicken = I915_READ(CHICKEN_TRANS(cpu_transcoder));
+               i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
+                                                       cpu_transcoder);
+               u32 chicken = I915_READ(reg);
 
-               if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv))
+               if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
                        chicken |= (PSR2_VSC_ENABLE_PROG_HEADER
                                   | PSR2_ADD_VERTICAL_LINE_COUNT);
 
                else
                        chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL;
-               I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
-
-               I915_WRITE(EDP_PSR_DEBUG,
-                          EDP_PSR_DEBUG_MASK_MEMUP |
-                          EDP_PSR_DEBUG_MASK_HPD |
-                          EDP_PSR_DEBUG_MASK_LPSP |
-                          EDP_PSR_DEBUG_MASK_MAX_SLEEP |
-                          EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
-       } else {
-               /*
-                * Per Spec: Avoid continuous PSR exit by masking MEMUP
-                * and HPD. also mask LPSP to avoid dependency on other
-                * drivers that might block runtime_pm besides
-                * preventing  other hw tracking issues now we can rely
-                * on frontbuffer tracking.
-                */
-               I915_WRITE(EDP_PSR_DEBUG,
-                          EDP_PSR_DEBUG_MASK_MEMUP |
-                          EDP_PSR_DEBUG_MASK_HPD |
-                          EDP_PSR_DEBUG_MASK_LPSP |
-                          EDP_PSR_DEBUG_MASK_DISP_REG_WRITE |
-                          EDP_PSR_DEBUG_MASK_MAX_SLEEP);
+               I915_WRITE(reg, chicken);
        }
+
+       /*
+        * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
+        * mask LPSP to avoid dependency on other drivers that might block
+        * runtime_pm besides preventing  other hw tracking issues now we
+        * can rely on frontbuffer tracking.
+        */
+       mask = EDP_PSR_DEBUG_MASK_MEMUP |
+              EDP_PSR_DEBUG_MASK_HPD |
+              EDP_PSR_DEBUG_MASK_LPSP |
+              EDP_PSR_DEBUG_MASK_MAX_SLEEP;
+
+       if (INTEL_GEN(dev_priv) < 11)
+               mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
+
+       I915_WRITE(EDP_PSR_DEBUG, mask);
 }
 
 static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
@@ -656,49 +697,34 @@ unlock:
        mutex_unlock(&dev_priv->psr.lock);
 }
 
-static void
-intel_psr_disable_source(struct intel_dp *intel_dp)
+static void intel_psr_exit(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
-       if (dev_priv->psr.active) {
-               i915_reg_t psr_status;
-               u32 psr_status_mask;
-
-               if (dev_priv->psr.psr2_enabled) {
-                       psr_status = EDP_PSR2_STATUS;
-                       psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
-
-                       I915_WRITE(EDP_PSR2_CTL,
-                                  I915_READ(EDP_PSR2_CTL) &
-                                  ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
-
-               } else {
-                       psr_status = EDP_PSR_STATUS;
-                       psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
-
-                       I915_WRITE(EDP_PSR_CTL,
-                                  I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
-               }
+       u32 val;
 
-               /* Wait till PSR is idle */
-               if (intel_wait_for_register(dev_priv,
-                                           psr_status, psr_status_mask, 0,
-                                           2000))
-                       DRM_ERROR("Timed out waiting for PSR Idle State\n");
+       if (!dev_priv->psr.active) {
+               if (INTEL_GEN(dev_priv) >= 9)
+                       WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
+               WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+               return;
+       }
 
-               dev_priv->psr.active = false;
+       if (dev_priv->psr.psr2_enabled) {
+               val = I915_READ(EDP_PSR2_CTL);
+               WARN_ON(!(val & EDP_PSR2_ENABLE));
+               I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
        } else {
-               if (dev_priv->psr.psr2_enabled)
-                       WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
-               else
-                       WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+               val = I915_READ(EDP_PSR_CTL);
+               WARN_ON(!(val & EDP_PSR_ENABLE));
+               I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
        }
+       dev_priv->psr.active = false;
 }
 
 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       i915_reg_t psr_status;
+       u32 psr_status_mask;
 
        lockdep_assert_held(&dev_priv->psr.lock);
 
@@ -707,7 +733,21 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
 
        DRM_DEBUG_KMS("Disabling PSR%s\n",
                      dev_priv->psr.psr2_enabled ? "2" : "1");
-       intel_psr_disable_source(intel_dp);
+
+       intel_psr_exit(dev_priv);
+
+       if (dev_priv->psr.psr2_enabled) {
+               psr_status = EDP_PSR2_STATUS;
+               psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
+       } else {
+               psr_status = EDP_PSR_STATUS;
+               psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
+       }
+
+       /* Wait till PSR is idle */
+       if (intel_wait_for_register(dev_priv, psr_status, psr_status_mask, 0,
+                                   2000))
+               DRM_ERROR("Timed out waiting PSR idle state\n");
 
        /* Disable PSR on Sink */
        drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
@@ -925,25 +965,6 @@ unlock:
        mutex_unlock(&dev_priv->psr.lock);
 }
 
-static void intel_psr_exit(struct drm_i915_private *dev_priv)
-{
-       u32 val;
-
-       if (!dev_priv->psr.active)
-               return;
-
-       if (dev_priv->psr.psr2_enabled) {
-               val = I915_READ(EDP_PSR2_CTL);
-               WARN_ON(!(val & EDP_PSR2_ENABLE));
-               I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
-       } else {
-               val = I915_READ(EDP_PSR_CTL);
-               WARN_ON(!(val & EDP_PSR_ENABLE));
-               I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
-       }
-       dev_priv->psr.active = false;
-}
-
 /**
  * intel_psr_invalidate - Invalidade PSR
  * @dev_priv: i915 device
@@ -1026,20 +1047,16 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
 
        /* By definition flush = invalidate + flush */
        if (frontbuffer_bits) {
-               if (dev_priv->psr.psr2_enabled) {
-                       intel_psr_exit(dev_priv);
-               } else {
-                       /*
-                        * Display WA #0884: all
-                        * This documented WA for bxt can be safely applied
-                        * broadly so we can force HW tracking to exit PSR
-                        * instead of disabling and re-enabling.
-                        * Workaround tells us to write 0 to CUR_SURFLIVE_A,
-                        * but it makes more sense write to the current active
-                        * pipe.
-                        */
-                       I915_WRITE(CURSURFLIVE(pipe), 0);
-               }
+               /*
+                * Display WA #0884: all
+                * This documented WA for bxt can be safely applied
+                * broadly so we can force HW tracking to exit PSR
+                * instead of disabling and re-enabling.
+                * Workaround tells us to write 0 to CUR_SURFLIVE_A,
+                * but it makes more sense write to the current active
+                * pipe.
+                */
+               I915_WRITE(CURSURFLIVE(pipe), 0);
        }
 
        if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
@@ -1065,12 +1082,9 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
        if (!dev_priv->psr.sink_support)
                return;
 
-       if (i915_modparams.enable_psr == -1) {
-               i915_modparams.enable_psr = dev_priv->vbt.psr.enable;
-
-               /* Per platform default: all disabled. */
-               i915_modparams.enable_psr = 0;
-       }
+       if (i915_modparams.enable_psr == -1)
+               if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
+                       i915_modparams.enable_psr = 0;
 
        /* Set link_standby x link_off defaults */
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -1130,8 +1144,6 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
                intel_psr_disable_locked(intel_dp);
        /* clear status register */
        drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
-
-       /* TODO: handle PSR2 errors */
 exit:
        mutex_unlock(&psr->lock);
 }
diff --git a/drivers/gpu/drm/i915/intel_quirks.c b/drivers/gpu/drm/i915/intel_quirks.c
new file mode 100644 (file)
index 0000000..ec2b0fc
--- /dev/null
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/dmi.h>
+
+#include "intel_drv.h"
+
+/*
+ * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
+ */
+static void quirk_ssc_force_disable(struct drm_i915_private *i915)
+{
+       i915->quirks |= QUIRK_LVDS_SSC_DISABLE;
+       DRM_INFO("applying lvds SSC disable quirk\n");
+}
+
+/*
+ * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
+ * brightness value
+ */
+static void quirk_invert_brightness(struct drm_i915_private *i915)
+{
+       i915->quirks |= QUIRK_INVERT_BRIGHTNESS;
+       DRM_INFO("applying inverted panel brightness quirk\n");
+}
+
+/* Some VBT's incorrectly indicate no backlight is present */
+static void quirk_backlight_present(struct drm_i915_private *i915)
+{
+       i915->quirks |= QUIRK_BACKLIGHT_PRESENT;
+       DRM_INFO("applying backlight present quirk\n");
+}
+
+/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
+ * which is 300 ms greater than eDP spec T12 min.
+ */
+static void quirk_increase_t12_delay(struct drm_i915_private *i915)
+{
+       i915->quirks |= QUIRK_INCREASE_T12_DELAY;
+       DRM_INFO("Applying T12 delay quirk\n");
+}
+
+/*
+ * GeminiLake NUC HDMI outputs require additional off time
+ * this allows the onboard retimer to correctly sync to signal
+ */
+static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915)
+{
+       i915->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
+       DRM_INFO("Applying Increase DDI Disabled quirk\n");
+}
+
+struct intel_quirk {
+       int device;
+       int subsystem_vendor;
+       int subsystem_device;
+       void (*hook)(struct drm_i915_private *i915);
+};
+
+/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
+struct intel_dmi_quirk {
+       void (*hook)(struct drm_i915_private *i915);
+       const struct dmi_system_id (*dmi_id_list)[];
+};
+
+static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+{
+       DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
+       return 1;
+}
+
+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
+       {
+               .dmi_id_list = &(const struct dmi_system_id[]) {
+                       {
+                               .callback = intel_dmi_reverse_brightness,
+                               .ident = "NCR Corporation",
+                               .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
+                                           DMI_MATCH(DMI_PRODUCT_NAME, ""),
+                               },
+                       },
+                       { }  /* terminating entry */
+               },
+               .hook = quirk_invert_brightness,
+       },
+};
+
+static struct intel_quirk intel_quirks[] = {
+       /* Lenovo U160 cannot use SSC on LVDS */
+       { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
+
+       /* Sony Vaio Y cannot use SSC on LVDS */
+       { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
+
+       /* Acer Aspire 5734Z must invert backlight brightness */
+       { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
+
+       /* Acer/eMachines G725 */
+       { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
+
+       /* Acer/eMachines e725 */
+       { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
+
+       /* Acer/Packard Bell NCL20 */
+       { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
+
+       /* Acer Aspire 4736Z */
+       { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+
+       /* Acer Aspire 5336 */
+       { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
+
+       /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
+       { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
+
+       /* Acer C720 Chromebook (Core i3 4005U) */
+       { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
+
+       /* Apple Macbook 2,1 (Core 2 T7400) */
+       { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
+
+       /* Apple Macbook 4,1 */
+       { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
+
+       /* Toshiba CB35 Chromebook (Celeron 2955U) */
+       { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
+
+       /* HP Chromebook 14 (Celeron 2955U) */
+       { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
+
+       /* Dell Chromebook 11 */
+       { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
+
+       /* Dell Chromebook 11 (2015 version) */
+       { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
+
+       /* Toshiba Satellite P50-C-18C */
+       { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
+
+       /* GeminiLake NUC */
+       { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+       { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+       /* ASRock ITX*/
+       { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+       { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+};
+
+void intel_init_quirks(struct drm_i915_private *i915)
+{
+       struct pci_dev *d = i915->drm.pdev;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
+               struct intel_quirk *q = &intel_quirks[i];
+
+               if (d->device == q->device &&
+                   (d->subsystem_vendor == q->subsystem_vendor ||
+                    q->subsystem_vendor == PCI_ANY_ID) &&
+                   (d->subsystem_device == q->subsystem_device ||
+                    q->subsystem_device == PCI_ANY_ID))
+                       q->hook(i915);
+       }
+       for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
+               if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
+                       intel_dmi_quirks[i].hook(i915);
+       }
+}
index d0ef50bf930ad747abe7b4510521f8ad79923ba5..87eebc13c0d86bb2649c58654b618240d9a0a76e 100644 (file)
@@ -91,6 +91,7 @@ static int
 gen4_render_ring_flush(struct i915_request *rq, u32 mode)
 {
        u32 cmd, *cs;
+       int i;
 
        /*
         * read/write caches:
@@ -127,12 +128,45 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
                        cmd |= MI_INVALIDATE_ISP;
        }
 
-       cs = intel_ring_begin(rq, 2);
+       i = 2;
+       if (mode & EMIT_INVALIDATE)
+               i += 20;
+
+       cs = intel_ring_begin(rq, i);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
        *cs++ = cmd;
-       *cs++ = MI_NOOP;
+
+       /*
+        * A random delay to let the CS invalidate take effect? Without this
+        * delay, the GPU relocation path fails as the CS does not see
+        * the updated contents. Just as important, if we apply the flushes
+        * to the EMIT_FLUSH branch (i.e. immediately after the relocation
+        * write and before the invalidate on the next batch), the relocations
+        * still fail. This implies that is a delay following invalidation
+        * that is required to reset the caches as opposed to a delay to
+        * ensure the memory is written.
+        */
+       if (mode & EMIT_INVALIDATE) {
+               *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
+               *cs++ = i915_ggtt_offset(rq->engine->scratch) |
+                       PIPE_CONTROL_GLOBAL_GTT;
+               *cs++ = 0;
+               *cs++ = 0;
+
+               for (i = 0; i < 12; i++)
+                       *cs++ = MI_FLUSH;
+
+               *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
+               *cs++ = i915_ggtt_offset(rq->engine->scratch) |
+                       PIPE_CONTROL_GLOBAL_GTT;
+               *cs++ = 0;
+               *cs++ = 0;
+       }
+
+       *cs++ = cmd;
+
        intel_ring_advance(rq, cs);
 
        return 0;
@@ -574,7 +608,9 @@ static void skip_request(struct i915_request *rq)
 
 static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq)
 {
-       GEM_TRACE("%s seqno=%x\n", engine->name, rq ? rq->global_seqno : 0);
+       GEM_TRACE("%s request global=%d, current=%d\n",
+                 engine->name, rq ? rq->global_seqno : 0,
+                 intel_engine_get_seqno(engine));
 
        /*
         * Try to restore the logical GPU state to match the continuation
@@ -1021,8 +1057,7 @@ i915_emit_bb_start(struct i915_request *rq,
 int intel_ring_pin(struct intel_ring *ring)
 {
        struct i915_vma *vma = ring->vma;
-       enum i915_map_type map =
-               HAS_LLC(vma->vm->i915) ? I915_MAP_WB : I915_MAP_WC;
+       enum i915_map_type map = i915_coherent_map_type(vma->vm->i915);
        unsigned int flags;
        void *addr;
        int ret;
index 2dfa585712c28ac4196830a30dafc4448b3de604..8a2270b209b0c5da57864beec724f0bfd9bbc348 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
 #ifndef _INTEL_RINGBUFFER_H_
 #define _INTEL_RINGBUFFER_H_
 
@@ -93,11 +93,11 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
 #define I915_MAX_SUBSLICES 8
 
 #define instdone_slice_mask(dev_priv__) \
-       (INTEL_GEN(dev_priv__) == 7 ? \
+       (IS_GEN7(dev_priv__) ? \
         1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
 
 #define instdone_subslice_mask(dev_priv__) \
-       (INTEL_GEN(dev_priv__) == 7 ? \
+       (IS_GEN7(dev_priv__) ? \
         1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0])
 
 #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
@@ -190,11 +190,22 @@ enum intel_engine_id {
 };
 
 struct i915_priolist {
+       struct list_head requests[I915_PRIORITY_COUNT];
        struct rb_node node;
-       struct list_head requests;
+       unsigned long used;
        int priority;
 };
 
+#define priolist_for_each_request(it, plist, idx) \
+       for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
+               list_for_each_entry(it, &(plist)->requests[idx], sched.link)
+
+#define priolist_for_each_request_consume(it, n, plist, idx) \
+       for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \
+               list_for_each_entry_safe(it, n, \
+                                        &(plist)->requests[idx - 1], \
+                                        sched.link)
+
 struct st_preempt_hang {
        struct completion completion;
        bool inject_hang;
@@ -487,11 +498,10 @@ struct intel_engine_cs {
         */
        void            (*submit_request)(struct i915_request *rq);
 
-       /* Call when the priority on a request has changed and it and its
+       /*
+        * Call when the priority on a request has changed and it and its
         * dependencies may need rescheduling. Note the request itself may
         * not be ready to run!
-        *
-        * Called under the struct_mutex.
         */
        void            (*schedule)(struct i915_request *request,
                                    const struct i915_sched_attr *attr);
index 0fdabce647ab64be1751da09ed705de3889ad969..1c2de9b69a199c0534b1358a8e647bbb3a41dda0 100644 (file)
@@ -208,7 +208,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
 
        is_enabled = true;
 
-       for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) {
+       for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
                if (power_well->desc->always_on)
                        continue;
 
@@ -436,6 +436,15 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
        I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
 
        hsw_wait_for_power_well_enable(dev_priv, power_well);
+
+       /* Display WA #1178: icl */
+       if (IS_ICELAKE(dev_priv) &&
+           pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
+           !intel_bios_is_port_edp(dev_priv, port)) {
+               val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
+               val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
+               I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
+       }
 }
 
 static void
@@ -456,6 +465,25 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
        hsw_wait_for_power_well_disable(dev_priv, power_well);
 }
 
+#define ICL_AUX_PW_TO_CH(pw_idx)       \
+       ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
+
+static void
+icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+                                struct i915_power_well *power_well)
+{
+       enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
+       u32 val;
+
+       val = I915_READ(DP_AUX_CH_CTL(aux_ch));
+       val &= ~DP_AUX_CH_CTL_TBT_IO;
+       if (power_well->desc->hsw.is_tc_tbt)
+               val |= DP_AUX_CH_CTL_TBT_IO;
+       I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
+
+       hsw_power_well_enable(dev_priv, power_well);
+}
+
 /*
  * We should only use the power well if we explicitly asked the hardware to
  * enable it, so check if it's enabled and also check if we've requested it to
@@ -465,11 +493,25 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
                                   struct i915_power_well *power_well)
 {
        const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+       enum i915_power_well_id id = power_well->desc->id;
        int pw_idx = power_well->desc->hsw.idx;
        u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
                   HSW_PWR_WELL_CTL_STATE(pw_idx);
+       u32 val;
 
-       return (I915_READ(regs->driver) & mask) == mask;
+       val = I915_READ(regs->driver);
+
+       /*
+        * On GEN9 big core due to a DMC bug the driver's request bits for PW1
+        * and the MISC_IO PW will be not restored, so check instead for the
+        * BIOS's own request bits, which are forced-on for these power wells
+        * when exiting DC5/6.
+        */
+       if (IS_GEN9(dev_priv) && !IS_GEN9_LP(dev_priv) &&
+           (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
+               val |= I915_READ(regs->bios);
+
+       return (val & mask) == mask;
 }
 
 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
@@ -551,7 +593,9 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
        u32 mask;
 
        mask = DC_STATE_EN_UPTO_DC5;
-       if (IS_GEN9_LP(dev_priv))
+       if (INTEL_GEN(dev_priv) >= 11)
+               mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
+       else if (IS_GEN9_LP(dev_priv))
                mask |= DC_STATE_EN_DC9;
        else
                mask |= DC_STATE_EN_UPTO_DC6;
@@ -624,8 +668,13 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv)
        assert_can_enable_dc9(dev_priv);
 
        DRM_DEBUG_KMS("Enabling DC9\n");
-
-       intel_power_sequencer_reset(dev_priv);
+       /*
+        * Power sequencer reset is not needed on
+        * platforms with South Display Engine on PCH,
+        * because PPS registers are always on.
+        */
+       if (!HAS_PCH_SPLIT(dev_priv))
+               intel_power_sequencer_reset(dev_priv);
        gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
 }
 
@@ -707,7 +756,7 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
        assert_csr_loaded(dev_priv);
 }
 
-static void skl_enable_dc6(struct drm_i915_private *dev_priv)
+void skl_enable_dc6(struct drm_i915_private *dev_priv)
 {
        assert_can_enable_dc6(dev_priv);
 
@@ -808,6 +857,14 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
 
        if (IS_GEN9_LP(dev_priv))
                bxt_verify_ddi_phy_power_wells(dev_priv);
+
+       if (INTEL_GEN(dev_priv) >= 11)
+               /*
+                * DMC retains HW context only for port A, the other combo
+                * PHY's HW context for port B is lost after DC transitions,
+                * so we need to restore it manually.
+                */
+               icl_combo_phys_init(dev_priv);
 }
 
 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
@@ -1608,7 +1665,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
             intel_display_power_domain_str(domain));
        power_domains->domain_use_count[domain]--;
 
-       for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain))
+       for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
                intel_power_well_put(dev_priv, power_well);
 
        mutex_unlock(&power_domains->lock);
@@ -2041,7 +2098,7 @@ static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
 static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
        {
                .name = "always-on",
-               .always_on = 1,
+               .always_on = true,
                .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
                .id = DISP_PW_ID_NONE,
@@ -2058,7 +2115,7 @@ static const struct i915_power_well_ops i830_pipes_power_well_ops = {
 static const struct i915_power_well_desc i830_power_wells[] = {
        {
                .name = "always-on",
-               .always_on = 1,
+               .always_on = true,
                .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
                .id = DISP_PW_ID_NONE,
@@ -2102,7 +2159,7 @@ static const struct i915_power_well_regs hsw_power_well_regs = {
 static const struct i915_power_well_desc hsw_power_wells[] = {
        {
                .name = "always-on",
-               .always_on = 1,
+               .always_on = true,
                .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
                .id = DISP_PW_ID_NONE,
@@ -2123,7 +2180,7 @@ static const struct i915_power_well_desc hsw_power_wells[] = {
 static const struct i915_power_well_desc bdw_power_wells[] = {
        {
                .name = "always-on",
-               .always_on = 1,
+               .always_on = true,
                .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
                .id = DISP_PW_ID_NONE,
@@ -2166,7 +2223,7 @@ static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
 static const struct i915_power_well_desc vlv_power_wells[] = {
        {
                .name = "always-on",
-               .always_on = 1,
+               .always_on = true,
                .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
                .id = DISP_PW_ID_NONE,
@@ -2242,7 +2299,7 @@ static const struct i915_power_well_desc vlv_power_wells[] = {
 static const struct i915_power_well_desc chv_power_wells[] = {
        {
                .name = "always-on",
-               .always_on = 1,
+               .always_on = true,
                .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
                .id = DISP_PW_ID_NONE,
@@ -2293,7 +2350,7 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
 static const struct i915_power_well_desc skl_power_wells[] = {
        {
                .name = "always-on",
-               .always_on = 1,
+               .always_on = true,
                .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
                .id = DISP_PW_ID_NONE,
@@ -2301,6 +2358,7 @@ static const struct i915_power_well_desc skl_power_wells[] = {
        {
                .name = "power well 1",
                /* Handled by the DMC firmware */
+               .always_on = true,
                .domains = 0,
                .ops = &hsw_power_well_ops,
                .id = SKL_DISP_PW_1,
@@ -2313,6 +2371,7 @@ static const struct i915_power_well_desc skl_power_wells[] = {
        {
                .name = "MISC IO power well",
                /* Handled by the DMC firmware */
+               .always_on = true,
                .domains = 0,
                .ops = &hsw_power_well_ops,
                .id = SKL_DISP_PW_MISC_IO,
@@ -2385,13 +2444,15 @@ static const struct i915_power_well_desc skl_power_wells[] = {
 static const struct i915_power_well_desc bxt_power_wells[] = {
        {
                .name = "always-on",
-               .always_on = 1,
+               .always_on = true,
                .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
                .id = DISP_PW_ID_NONE,
        },
        {
                .name = "power well 1",
+               /* Handled by the DMC firmware */
+               .always_on = true,
                .domains = 0,
                .ops = &hsw_power_well_ops,
                .id = SKL_DISP_PW_1,
@@ -2443,7 +2504,7 @@ static const struct i915_power_well_desc bxt_power_wells[] = {
 static const struct i915_power_well_desc glk_power_wells[] = {
        {
                .name = "always-on",
-               .always_on = 1,
+               .always_on = true,
                .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
                .id = DISP_PW_ID_NONE,
@@ -2451,6 +2512,7 @@ static const struct i915_power_well_desc glk_power_wells[] = {
        {
                .name = "power well 1",
                /* Handled by the DMC firmware */
+               .always_on = true,
                .domains = 0,
                .ops = &hsw_power_well_ops,
                .id = SKL_DISP_PW_1,
@@ -2571,7 +2633,7 @@ static const struct i915_power_well_desc glk_power_wells[] = {
 static const struct i915_power_well_desc cnl_power_wells[] = {
        {
                .name = "always-on",
-               .always_on = 1,
+               .always_on = true,
                .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
                .id = DISP_PW_ID_NONE,
@@ -2579,6 +2641,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = {
        {
                .name = "power well 1",
                /* Handled by the DMC firmware */
+               .always_on = true,
                .domains = 0,
                .ops = &hsw_power_well_ops,
                .id = SKL_DISP_PW_1,
@@ -2716,6 +2779,13 @@ static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
        .is_enabled = hsw_power_well_enabled,
 };
 
+static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
+       .sync_hw = hsw_power_well_sync_hw,
+       .enable = icl_tc_phy_aux_power_well_enable,
+       .disable = hsw_power_well_disable,
+       .is_enabled = hsw_power_well_enabled,
+};
+
 static const struct i915_power_well_regs icl_aux_power_well_regs = {
        .bios   = ICL_PWR_WELL_CTL_AUX1,
        .driver = ICL_PWR_WELL_CTL_AUX2,
@@ -2731,7 +2801,7 @@ static const struct i915_power_well_regs icl_ddi_power_well_regs = {
 static const struct i915_power_well_desc icl_power_wells[] = {
        {
                .name = "always-on",
-               .always_on = 1,
+               .always_on = true,
                .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
                .id = DISP_PW_ID_NONE,
@@ -2739,6 +2809,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
        {
                .name = "power well 1",
                /* Handled by the DMC firmware */
+               .always_on = true,
                .domains = 0,
                .ops = &hsw_power_well_ops,
                .id = SKL_DISP_PW_1,
@@ -2748,6 +2819,12 @@ static const struct i915_power_well_desc icl_power_wells[] = {
                        .hsw.has_fuses = true,
                },
        },
+       {
+               .name = "DC off",
+               .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
+               .ops = &gen9_dc_off_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+       },
        {
                .name = "power well 2",
                .domains = ICL_PW_2_POWER_DOMAINS,
@@ -2759,12 +2836,6 @@ static const struct i915_power_well_desc icl_power_wells[] = {
                        .hsw.has_fuses = true,
                },
        },
-       {
-               .name = "DC off",
-               .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
-               .ops = &gen9_dc_off_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-       },
        {
                .name = "power well 3",
                .domains = ICL_PW_3_POWER_DOMAINS,
@@ -2861,81 +2932,89 @@ static const struct i915_power_well_desc icl_power_wells[] = {
        {
                .name = "AUX C",
                .domains = ICL_AUX_C_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
+               .ops = &icl_tc_phy_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
                        .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
+                       .hsw.is_tc_tbt = false,
                },
        },
        {
                .name = "AUX D",
                .domains = ICL_AUX_D_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
+               .ops = &icl_tc_phy_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
                        .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
+                       .hsw.is_tc_tbt = false,
                },
        },
        {
                .name = "AUX E",
                .domains = ICL_AUX_E_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
+               .ops = &icl_tc_phy_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
                        .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
+                       .hsw.is_tc_tbt = false,
                },
        },
        {
                .name = "AUX F",
                .domains = ICL_AUX_F_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
+               .ops = &icl_tc_phy_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
                        .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
+                       .hsw.is_tc_tbt = false,
                },
        },
        {
                .name = "AUX TBT1",
                .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
+               .ops = &icl_tc_phy_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
                        .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
+                       .hsw.is_tc_tbt = true,
                },
        },
        {
                .name = "AUX TBT2",
                .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
+               .ops = &icl_tc_phy_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
                        .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
+                       .hsw.is_tc_tbt = true,
                },
        },
        {
                .name = "AUX TBT3",
                .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
+               .ops = &icl_tc_phy_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
                        .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
+                       .hsw.is_tc_tbt = true,
                },
        },
        {
                .name = "AUX TBT4",
                .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
+               .ops = &icl_tc_phy_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
                        .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
+                       .hsw.is_tc_tbt = true,
                },
        },
        {
@@ -2969,17 +3048,20 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
        int requested_dc;
        int max_dc;
 
-       if (IS_GEN9_BC(dev_priv) || INTEL_INFO(dev_priv)->gen >= 10) {
+       if (INTEL_GEN(dev_priv) >= 11) {
                max_dc = 2;
-               mask = 0;
-       } else if (IS_GEN9_LP(dev_priv)) {
-               max_dc = 1;
                /*
                 * DC9 has a separate HW flow from the rest of the DC states,
                 * not depending on the DMC firmware. It's needed by system
                 * suspend/resume, so allow it unconditionally.
                 */
                mask = DC_STATE_EN_DC9;
+       } else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv)) {
+               max_dc = 2;
+               mask = 0;
+       } else if (IS_GEN9_LP(dev_priv)) {
+               max_dc = 1;
+               mask = DC_STATE_EN_DC9;
        } else {
                max_dc = 0;
                mask = 0;
@@ -3075,12 +3157,6 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
         */
        if (IS_ICELAKE(dev_priv)) {
                err = set_power_wells(power_domains, icl_power_wells);
-       } else if (IS_HASWELL(dev_priv)) {
-               err = set_power_wells(power_domains, hsw_power_wells);
-       } else if (IS_BROADWELL(dev_priv)) {
-               err = set_power_wells(power_domains, bdw_power_wells);
-       } else if (IS_GEN9_BC(dev_priv)) {
-               err = set_power_wells(power_domains, skl_power_wells);
        } else if (IS_CANNONLAKE(dev_priv)) {
                err = set_power_wells(power_domains, cnl_power_wells);
 
@@ -3092,13 +3168,18 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
                 */
                if (!IS_CNL_WITH_PORT_F(dev_priv))
                        power_domains->power_well_count -= 2;
-
-       } else if (IS_BROXTON(dev_priv)) {
-               err = set_power_wells(power_domains, bxt_power_wells);
        } else if (IS_GEMINILAKE(dev_priv)) {
                err = set_power_wells(power_domains, glk_power_wells);
+       } else if (IS_BROXTON(dev_priv)) {
+               err = set_power_wells(power_domains, bxt_power_wells);
+       } else if (IS_GEN9_BC(dev_priv)) {
+               err = set_power_wells(power_domains, skl_power_wells);
        } else if (IS_CHERRYVIEW(dev_priv)) {
                err = set_power_wells(power_domains, chv_power_wells);
+       } else if (IS_BROADWELL(dev_priv)) {
+               err = set_power_wells(power_domains, bdw_power_wells);
+       } else if (IS_HASWELL(dev_priv)) {
+               err = set_power_wells(power_domains, hsw_power_wells);
        } else if (IS_VALLEYVIEW(dev_priv)) {
                err = set_power_wells(power_domains, vlv_power_wells);
        } else if (IS_I830(dev_priv)) {
@@ -3176,8 +3257,7 @@ static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
                            u8 req_slices)
 {
-       u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
-       u32 val;
+       const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
        bool ret;
 
        if (req_slices > intel_dbuf_max_slices(dev_priv)) {
@@ -3188,7 +3268,6 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
        if (req_slices == hw_enabled_slices || req_slices == 0)
                return;
 
-       val = I915_READ(DBUF_CTL_S2);
        if (req_slices > hw_enabled_slices)
                ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
        else
@@ -3240,18 +3319,40 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
        I915_WRITE(MBUS_ABOX_CTL, val);
 }
 
+static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
+                                     bool enable)
+{
+       i915_reg_t reg;
+       u32 reset_bits, val;
+
+       if (IS_IVYBRIDGE(dev_priv)) {
+               reg = GEN7_MSG_CTL;
+               reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
+       } else {
+               reg = HSW_NDE_RSTWRN_OPT;
+               reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
+       }
+
+       val = I915_READ(reg);
+
+       if (enable)
+               val |= reset_bits;
+       else
+               val &= ~reset_bits;
+
+       I915_WRITE(reg, val);
+}
+
 static void skl_display_core_init(struct drm_i915_private *dev_priv,
                                   bool resume)
 {
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
        struct i915_power_well *well;
-       uint32_t val;
 
        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 
        /* enable PCH reset handshake */
-       val = I915_READ(HSW_NDE_RSTWRN_OPT);
-       I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
+       intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
 
        /* enable PG1 and Misc I/O */
        mutex_lock(&power_domains->lock);
@@ -3307,7 +3408,6 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
 {
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
        struct i915_power_well *well;
-       uint32_t val;
 
        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 
@@ -3317,9 +3417,7 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
         * Move the handshake programming to initialization sequence.
         * Previously was left up to BIOS.
         */
-       val = I915_READ(HSW_NDE_RSTWRN_OPT);
-       val &= ~RESET_PCH_HANDSHAKE_ENABLE;
-       I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
+       intel_pch_reset_handshake(dev_priv, false);
 
        /* Enable PG1 */
        mutex_lock(&power_domains->lock);
@@ -3365,101 +3463,18 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
        usleep_range(10, 30);           /* 10 us delay per Bspec */
 }
 
-enum {
-       PROCMON_0_85V_DOT_0,
-       PROCMON_0_95V_DOT_0,
-       PROCMON_0_95V_DOT_1,
-       PROCMON_1_05V_DOT_0,
-       PROCMON_1_05V_DOT_1,
-};
-
-static const struct cnl_procmon {
-       u32 dw1, dw9, dw10;
-} cnl_procmon_values[] = {
-       [PROCMON_0_85V_DOT_0] =
-               { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
-       [PROCMON_0_95V_DOT_0] =
-               { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
-       [PROCMON_0_95V_DOT_1] =
-               { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
-       [PROCMON_1_05V_DOT_0] =
-               { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
-       [PROCMON_1_05V_DOT_1] =
-               { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
-};
-
-/*
- * CNL has just one set of registers, while ICL has two sets: one for port A and
- * the other for port B. The CNL registers are equivalent to the ICL port A
- * registers, that's why we call the ICL macros even though the function has CNL
- * on its name.
- */
-static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
-                                      enum port port)
-{
-       const struct cnl_procmon *procmon;
-       u32 val;
-
-       val = I915_READ(ICL_PORT_COMP_DW3(port));
-       switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
-       default:
-               MISSING_CASE(val);
-               /* fall through */
-       case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
-               procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
-               break;
-       case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
-               procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
-               break;
-       case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
-               procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
-               break;
-       case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
-               procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
-               break;
-       case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
-               procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
-               break;
-       }
-
-       val = I915_READ(ICL_PORT_COMP_DW1(port));
-       val &= ~((0xff << 16) | 0xff);
-       val |= procmon->dw1;
-       I915_WRITE(ICL_PORT_COMP_DW1(port), val);
-
-       I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
-       I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
-}
-
 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
 {
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
        struct i915_power_well *well;
-       u32 val;
 
        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 
        /* 1. Enable PCH Reset Handshake */
-       val = I915_READ(HSW_NDE_RSTWRN_OPT);
-       val |= RESET_PCH_HANDSHAKE_ENABLE;
-       I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
+       intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
 
-       /* 2. Enable Comp */
-       val = I915_READ(CHICKEN_MISC_2);
-       val &= ~CNL_COMP_PWR_DOWN;
-       I915_WRITE(CHICKEN_MISC_2, val);
-
-       /* Dummy PORT_A to get the correct CNL register from the ICL macro */
-       cnl_set_procmon_ref_values(dev_priv, PORT_A);
-
-       val = I915_READ(CNL_PORT_COMP_DW0);
-       val |= COMP_INIT;
-       I915_WRITE(CNL_PORT_COMP_DW0, val);
-
-       /* 3. */
-       val = I915_READ(CNL_PORT_CL1CM_DW5);
-       val |= CL_POWER_DOWN_ENABLE;
-       I915_WRITE(CNL_PORT_CL1CM_DW5, val);
+       /* 2-3. */
+       cnl_combo_phys_init(dev_priv);
 
        /*
         * 4. Enable Power Well 1 (PG1).
@@ -3484,7 +3499,6 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
 {
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
        struct i915_power_well *well;
-       u32 val;
 
        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 
@@ -3508,44 +3522,23 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
 
        usleep_range(10, 30);           /* 10 us delay per Bspec */
 
-       /* 5. Disable Comp */
-       val = I915_READ(CHICKEN_MISC_2);
-       val |= CNL_COMP_PWR_DOWN;
-       I915_WRITE(CHICKEN_MISC_2, val);
+       /* 5. */
+       cnl_combo_phys_uninit(dev_priv);
 }
 
-static void icl_display_core_init(struct drm_i915_private *dev_priv,
-                                 bool resume)
+void icl_display_core_init(struct drm_i915_private *dev_priv,
+                          bool resume)
 {
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
        struct i915_power_well *well;
-       enum port port;
-       u32 val;
 
        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 
        /* 1. Enable PCH reset handshake. */
-       val = I915_READ(HSW_NDE_RSTWRN_OPT);
-       val |= RESET_PCH_HANDSHAKE_ENABLE;
-       I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
-
-       for (port = PORT_A; port <= PORT_B; port++) {
-               /* 2. Enable DDI combo PHY comp. */
-               val = I915_READ(ICL_PHY_MISC(port));
-               val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
-               I915_WRITE(ICL_PHY_MISC(port), val);
-
-               cnl_set_procmon_ref_values(dev_priv, port);
-
-               val = I915_READ(ICL_PORT_COMP_DW0(port));
-               val |= COMP_INIT;
-               I915_WRITE(ICL_PORT_COMP_DW0(port), val);
-
-               /* 3. Set power down enable. */
-               val = I915_READ(ICL_PORT_CL_DW5(port));
-               val |= CL_POWER_DOWN_ENABLE;
-               I915_WRITE(ICL_PORT_CL_DW5(port), val);
-       }
+       intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+
+       /* 2-3. */
+       icl_combo_phys_init(dev_priv);
 
        /*
         * 4. Enable Power Well 1 (PG1).
@@ -3569,12 +3562,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
                intel_csr_load_program(dev_priv);
 }
 
-static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
+void icl_display_core_uninit(struct drm_i915_private *dev_priv)
 {
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
        struct i915_power_well *well;
-       enum port port;
-       u32 val;
 
        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 
@@ -3596,12 +3587,8 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
        intel_power_well_disable(dev_priv, well);
        mutex_unlock(&power_domains->lock);
 
-       /* 5. Disable Comp */
-       for (port = PORT_A; port <= PORT_B; port++) {
-               val = I915_READ(ICL_PHY_MISC(port));
-               val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
-               I915_WRITE(ICL_PHY_MISC(port), val);
-       }
+       /* 5. */
+       icl_combo_phys_uninit(dev_priv);
 }
 
 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
@@ -3759,7 +3746,8 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
                mutex_lock(&power_domains->lock);
                vlv_cmnlane_wa(dev_priv);
                mutex_unlock(&power_domains->lock);
-       }
+       } else if (IS_IVYBRIDGE(dev_priv) || INTEL_GEN(dev_priv) >= 7)
+               intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
 
        /*
         * Keep all power wells enabled for any dependent HW access during
@@ -3953,14 +3941,6 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
                int domains_count;
                bool enabled;
 
-               /*
-                * Power wells not belonging to any domain (like the MISC_IO
-                * and PW1 power wells) are under FW control, so ignore them,
-                * since their state can change asynchronously.
-                */
-               if (!power_well->desc->domains)
-                       continue;
-
                enabled = power_well->desc->ops->is_enabled(dev_priv,
                                                            power_well);
                if ((power_well->count || power_well->desc->always_on) !=
index 701372e512a80663c75e42484663d6184f976f38..5805ec1aba122495736167c0f66ee86cd5da249d 100644 (file)
@@ -105,11 +105,6 @@ struct intel_sdvo {
        bool has_hdmi_audio;
        bool rgb_quant_range_selectable;
 
-       /**
-        * This is sdvo fixed pannel mode pointer
-        */
-       struct drm_display_mode *sdvo_lvds_fixed_mode;
-
        /* DDC bus used by this SDVO encoder */
        uint8_t ddc_bus;
 
@@ -765,10 +760,14 @@ intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
        args.height = height;
        args.interlace = 0;
 
-       if (IS_LVDS(intel_sdvo_connector) &&
-          (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
-           intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
-               args.scaled = 1;
+       if (IS_LVDS(intel_sdvo_connector)) {
+               const struct drm_display_mode *fixed_mode =
+                       intel_sdvo_connector->base.panel.fixed_mode;
+
+               if (fixed_mode->hdisplay != width ||
+                   fixed_mode->vdisplay != height)
+                       args.scaled = 1;
+       }
 
        return intel_sdvo_set_value(intel_sdvo,
                                    SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
@@ -1123,6 +1122,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
 
        DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
        pipe_config->pipe_bpp = 8*3;
+       pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
 
        if (HAS_PCH_SPLIT(to_i915(encoder->base.dev)))
                pipe_config->has_pch_encoder = true;
@@ -1144,7 +1144,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
                pipe_config->sdvo_tv_clock = true;
        } else if (IS_LVDS(intel_sdvo_connector)) {
                if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
-                                                            intel_sdvo->sdvo_lvds_fixed_mode))
+                                                            intel_sdvo_connector->base.panel.fixed_mode))
                        return false;
 
                (void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
@@ -1301,7 +1301,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
        /* lvds has a special fixed output timing. */
        if (IS_LVDS(intel_sdvo_connector))
                intel_sdvo_get_dtd_from_mode(&output_dtd,
-                                            intel_sdvo->sdvo_lvds_fixed_mode);
+                                            intel_sdvo_connector->base.panel.fixed_mode);
        else
                intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
        if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
@@ -1642,10 +1642,13 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
                return MODE_CLOCK_HIGH;
 
        if (IS_LVDS(intel_sdvo_connector)) {
-               if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
+               const struct drm_display_mode *fixed_mode =
+                       intel_sdvo_connector->base.panel.fixed_mode;
+
+               if (mode->hdisplay > fixed_mode->hdisplay)
                        return MODE_PANEL;
 
-               if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
+               if (mode->vdisplay > fixed_mode->vdisplay)
                        return MODE_PANEL;
        }
 
@@ -2058,14 +2061,6 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
        return !list_empty(&connector->probed_modes);
 }
 
-static void intel_sdvo_destroy(struct drm_connector *connector)
-{
-       struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
-
-       drm_connector_cleanup(connector);
-       kfree(intel_sdvo_connector);
-}
-
 static int
 intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
                                         const struct drm_connector_state *state,
@@ -2228,7 +2223,7 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
        .atomic_set_property = intel_sdvo_connector_atomic_set_property,
        .late_register = intel_sdvo_connector_register,
        .early_unregister = intel_sdvo_connector_unregister,
-       .destroy = intel_sdvo_destroy,
+       .destroy = intel_connector_destroy,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
        .atomic_duplicate_state = intel_sdvo_connector_duplicate_state,
 };
@@ -2267,10 +2262,6 @@ static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
 {
        struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder));
 
-       if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
-               drm_mode_destroy(encoder->dev,
-                                intel_sdvo->sdvo_lvds_fixed_mode);
-
        i2c_del_adapter(&intel_sdvo->ddc);
        intel_encoder_destroy(encoder);
 }
@@ -2583,7 +2574,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
        return true;
 
 err:
-       intel_sdvo_destroy(connector);
+       intel_connector_destroy(connector);
        return false;
 }
 
@@ -2663,19 +2654,22 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
 
        list_for_each_entry(mode, &connector->probed_modes, head) {
                if (mode->type & DRM_MODE_TYPE_PREFERRED) {
-                       intel_sdvo->sdvo_lvds_fixed_mode =
+                       struct drm_display_mode *fixed_mode =
                                drm_mode_duplicate(connector->dev, mode);
+
+                       intel_panel_init(&intel_connector->panel,
+                                        fixed_mode, NULL);
                        break;
                }
        }
 
-       if (!intel_sdvo->sdvo_lvds_fixed_mode)
+       if (!intel_connector->panel.fixed_mode)
                goto err;
 
        return true;
 
 err:
-       intel_sdvo_destroy(connector);
+       intel_connector_destroy(connector);
        return false;
 }
 
@@ -2745,7 +2739,7 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
                                 &dev->mode_config.connector_list, head) {
                if (intel_attached_encoder(connector) == &intel_sdvo->base) {
                        drm_connector_unregister(connector);
-                       intel_sdvo_destroy(connector);
+                       intel_connector_destroy(connector);
                }
        }
 }
index 5fd2f7bf3927191a22cdeba959c5fd7c4f6f512a..abe193815cccfde71d19d7d71e7ba1dd94fbd6c2 100644 (file)
@@ -40,6 +40,7 @@
 #include "intel_frontbuffer.h"
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
+#include <drm/drm_color_mgmt.h>
 
 int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
                             int usecs)
@@ -275,17 +276,24 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
        src->y2 = (src_y + src_h) << 16;
 
        if (fb->format->is_yuv &&
-           fb->format->format != DRM_FORMAT_NV12 &&
            (src_x & 1 || src_w & 1)) {
                DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
                              src_x, src_w);
                return -EINVAL;
        }
 
+       if (fb->format->is_yuv &&
+           fb->format->num_planes > 1 &&
+           (src_y & 1 || src_h & 1)) {
+               DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of 2 for planar YUV planes\n",
+                             src_y, src_h);
+               return -EINVAL;
+       }
+
        return 0;
 }
 
-unsigned int
+static unsigned int
 skl_plane_max_stride(struct intel_plane *plane,
                     u32 pixel_format, u64 modifier,
                     unsigned int rotation)
@@ -302,35 +310,201 @@ skl_plane_max_stride(struct intel_plane *plane,
                return min(8192 * cpp, 32768);
 }
 
-void
-skl_update_plane(struct intel_plane *plane,
-                const struct intel_crtc_state *crtc_state,
-                const struct intel_plane_state *plane_state)
+static void
+skl_program_scaler(struct intel_plane *plane,
+                  const struct intel_crtc_state *crtc_state,
+                  const struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum pipe pipe = plane->pipe;
+       int scaler_id = plane_state->scaler_id;
+       const struct intel_scaler *scaler =
+               &crtc_state->scaler_state.scalers[scaler_id];
+       int crtc_x = plane_state->base.dst.x1;
+       int crtc_y = plane_state->base.dst.y1;
+       uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+       uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+       u16 y_hphase, uv_rgb_hphase;
+       u16 y_vphase, uv_rgb_vphase;
+       int hscale, vscale;
+
+       hscale = drm_rect_calc_hscale(&plane_state->base.src,
+                                     &plane_state->base.dst,
+                                     0, INT_MAX);
+       vscale = drm_rect_calc_vscale(&plane_state->base.src,
+                                     &plane_state->base.dst,
+                                     0, INT_MAX);
+
+       /* TODO: handle sub-pixel coordinates */
+       if (plane_state->base.fb->format->format == DRM_FORMAT_NV12 &&
+           !icl_is_hdr_plane(plane)) {
+               y_hphase = skl_scaler_calc_phase(1, hscale, false);
+               y_vphase = skl_scaler_calc_phase(1, vscale, false);
+
+               /* MPEG2 chroma siting convention */
+               uv_rgb_hphase = skl_scaler_calc_phase(2, hscale, true);
+               uv_rgb_vphase = skl_scaler_calc_phase(2, vscale, false);
+       } else {
+               /* not used */
+               y_hphase = 0;
+               y_vphase = 0;
+
+               uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
+               uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
+       }
+
+       I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
+                     PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode);
+       I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
+                     PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+       I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
+                     PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+       I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
+       I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (crtc_w << 16) | crtc_h);
+}
+
+/* Preoffset values for YUV to RGB Conversion */
+#define PREOFF_YUV_TO_RGB_HI           0x1800
+#define PREOFF_YUV_TO_RGB_ME           0x1F00
+#define PREOFF_YUV_TO_RGB_LO           0x1800
+
+#define  ROFF(x)          (((x) & 0xffff) << 16)
+#define  GOFF(x)          (((x) & 0xffff) << 0)
+#define  BOFF(x)          (((x) & 0xffff) << 16)
+
+static void
+icl_program_input_csc_coeff(const struct intel_crtc_state *crtc_state,
+                           const struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv =
+               to_i915(plane_state->base.plane->dev);
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       enum pipe pipe = crtc->pipe;
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+       enum plane_id plane_id = plane->id;
+
+       static const u16 input_csc_matrix[][9] = {
+               /*
+                * BT.601 full range YCbCr -> full range RGB
+                * The matrix required is :
+                * [1.000, 0.000, 1.371,
+                *  1.000, -0.336, -0.698,
+                *  1.000, 1.732, 0.0000]
+                */
+               [DRM_COLOR_YCBCR_BT601] = {
+                       0x7AF8, 0x7800, 0x0,
+                       0x8B28, 0x7800, 0x9AC0,
+                       0x0, 0x7800, 0x7DD8,
+               },
+               /*
+                * BT.709 full range YCbCr -> full range RGB
+                * The matrix required is :
+                * [1.000, 0.000, 1.574,
+                *  1.000, -0.187, -0.468,
+                *  1.000, 1.855, 0.0000]
+                */
+               [DRM_COLOR_YCBCR_BT709] = {
+                       0x7C98, 0x7800, 0x0,
+                       0x9EF8, 0x7800, 0xABF8,
+                       0x0, 0x7800,  0x7ED8,
+               },
+       };
+
+       /* Matrix for Limited Range to Full Range Conversion */
+       static const u16 input_csc_matrix_lr[][9] = {
+               /*
+                * BT.601 Limted range YCbCr -> full range RGB
+                * The matrix required is :
+                * [1.164384, 0.000, 1.596370,
+                *  1.138393, -0.382500, -0.794598,
+                *  1.138393, 1.971696, 0.0000]
+                */
+               [DRM_COLOR_YCBCR_BT601] = {
+                       0x7CC8, 0x7950, 0x0,
+                       0x8CB8, 0x7918, 0x9C40,
+                       0x0, 0x7918, 0x7FC8,
+               },
+               /*
+                * BT.709 Limited range YCbCr -> full range RGB
+                * The matrix required is :
+                * [1.164, 0.000, 1.833671,
+                *  1.138393, -0.213249, -0.532909,
+                *  1.138393, 2.112402, 0.0000]
+                */
+               [DRM_COLOR_YCBCR_BT709] = {
+                       0x7EA8, 0x7950, 0x0,
+                       0x8888, 0x7918, 0xADA8,
+                       0x0, 0x7918,  0x6870,
+               },
+       };
+       const u16 *csc;
+
+       if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+               csc = input_csc_matrix[plane_state->base.color_encoding];
+       else
+               csc = input_csc_matrix_lr[plane_state->base.color_encoding];
+
+       I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), ROFF(csc[0]) |
+                     GOFF(csc[1]));
+       I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), BOFF(csc[2]));
+       I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), ROFF(csc[3]) |
+                     GOFF(csc[4]));
+       I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), BOFF(csc[5]));
+       I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), ROFF(csc[6]) |
+                     GOFF(csc[7]));
+       I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), BOFF(csc[8]));
+
+       I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
+                     PREOFF_YUV_TO_RGB_HI);
+       I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+                     PREOFF_YUV_TO_RGB_ME);
+       I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
+                     PREOFF_YUV_TO_RGB_LO);
+       I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
+       I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
+       I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
+}
+
+static void
+skl_program_plane(struct intel_plane *plane,
+                 const struct intel_crtc_state *crtc_state,
+                 const struct intel_plane_state *plane_state,
+                 int color_plane, bool slave, u32 plane_ctl)
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       const struct drm_framebuffer *fb = plane_state->base.fb;
        enum plane_id plane_id = plane->id;
        enum pipe pipe = plane->pipe;
-       u32 plane_ctl = plane_state->ctl;
        const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-       u32 surf_addr = plane_state->color_plane[0].offset;
-       u32 stride = skl_plane_stride(plane_state, 0);
+       u32 surf_addr = plane_state->color_plane[color_plane].offset;
+       u32 stride = skl_plane_stride(plane_state, color_plane);
        u32 aux_stride = skl_plane_stride(plane_state, 1);
        int crtc_x = plane_state->base.dst.x1;
        int crtc_y = plane_state->base.dst.y1;
-       uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
-       uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
-       uint32_t x = plane_state->color_plane[0].x;
-       uint32_t y = plane_state->color_plane[0].y;
+       uint32_t x = plane_state->color_plane[color_plane].x;
+       uint32_t y = plane_state->color_plane[color_plane].y;
        uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
        uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+       struct intel_plane *linked = plane_state->linked_plane;
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       u8 alpha = plane_state->base.alpha >> 8;
        unsigned long irqflags;
+       u32 keymsk, keymax;
 
        /* Sizes are 0 based */
        src_w--;
        src_h--;
-       crtc_w--;
-       crtc_h--;
+
+       keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
+
+       keymsk = key->channel_mask & 0x3ffffff;
+       if (alpha < 0xff)
+               keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
+
+       /* The scaler will handle the output position */
+       if (plane_state->scaler_id >= 0) {
+               crtc_x = 0;
+               crtc_y = 0;
+       }
 
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
@@ -338,71 +512,83 @@ skl_update_plane(struct intel_plane *plane,
                I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
                              plane_state->color_ctl);
 
-       if (key->flags) {
-               I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
-               I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), key->max_value);
-               I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), key->channel_mask);
-       }
+       if (fb->format->is_yuv && icl_is_hdr_plane(plane))
+               icl_program_input_csc_coeff(crtc_state, plane_state);
+
+       I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
+       I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax);
+       I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk);
 
        I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
        I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
        I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
        I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
                      (plane_state->color_plane[1].offset - surf_addr) | aux_stride);
-       I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
-                     (plane_state->color_plane[1].y << 16) |
-                     plane_state->color_plane[1].x);
 
-       /* program plane scaler */
-       if (plane_state->scaler_id >= 0) {
-               int scaler_id = plane_state->scaler_id;
-               const struct intel_scaler *scaler =
-                       &crtc_state->scaler_state.scalers[scaler_id];
-               u16 y_hphase, uv_rgb_hphase;
-               u16 y_vphase, uv_rgb_vphase;
-
-               /* TODO: handle sub-pixel coordinates */
-               if (fb->format->format == DRM_FORMAT_NV12) {
-                       y_hphase = skl_scaler_calc_phase(1, false);
-                       y_vphase = skl_scaler_calc_phase(1, false);
-
-                       /* MPEG2 chroma siting convention */
-                       uv_rgb_hphase = skl_scaler_calc_phase(2, true);
-                       uv_rgb_vphase = skl_scaler_calc_phase(2, false);
-               } else {
-                       /* not used */
-                       y_hphase = 0;
-                       y_vphase = 0;
-
-                       uv_rgb_hphase = skl_scaler_calc_phase(1, false);
-                       uv_rgb_vphase = skl_scaler_calc_phase(1, false);
+       if (INTEL_GEN(dev_priv) < 11)
+               I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
+                             (plane_state->color_plane[1].y << 16) |
+                              plane_state->color_plane[1].x);
+
+       if (icl_is_hdr_plane(plane)) {
+               u32 cus_ctl = 0;
+
+               if (linked) {
+                       /* Enable and use MPEG-2 chroma siting */
+                       cus_ctl = PLANE_CUS_ENABLE |
+                               PLANE_CUS_HPHASE_0 |
+                               PLANE_CUS_VPHASE_SIGN_NEGATIVE |
+                               PLANE_CUS_VPHASE_0_25;
+
+                       if (linked->id == PLANE_SPRITE5)
+                               cus_ctl |= PLANE_CUS_PLANE_7;
+                       else if (linked->id == PLANE_SPRITE4)
+                               cus_ctl |= PLANE_CUS_PLANE_6;
+                       else
+                               MISSING_CASE(linked->id);
                }
 
-               I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
-                             PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode);
-               I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
-               I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
-                             PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
-               I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
-                             PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
-               I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
-               I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id),
-                             ((crtc_w + 1) << 16)|(crtc_h + 1));
-
-               I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0);
-       } else {
-               I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
+               I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), cus_ctl);
        }
 
+       if (!slave && plane_state->scaler_id >= 0)
+               skl_program_scaler(plane, crtc_state, plane_state);
+
+       I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
+
        I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
        I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
                      intel_plane_ggtt_offset(plane_state) + surf_addr);
-       POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
 
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
-void
+static void
+skl_update_plane(struct intel_plane *plane,
+                const struct intel_crtc_state *crtc_state,
+                const struct intel_plane_state *plane_state)
+{
+       int color_plane = 0;
+
+       if (plane_state->linked_plane) {
+               /* Program the UV plane */
+               color_plane = 1;
+       }
+
+       skl_program_plane(plane, crtc_state, plane_state,
+                         color_plane, false, plane_state->ctl);
+}
+
+static void
+icl_update_slave(struct intel_plane *plane,
+                const struct intel_crtc_state *crtc_state,
+                const struct intel_plane_state *plane_state)
+{
+       skl_program_plane(plane, crtc_state, plane_state, 0, true,
+                         plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE);
+}
+
+static void
 skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -413,14 +599,12 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
        I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
-
        I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
-       POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
 
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
-bool
+static bool
 skl_plane_get_hw_state(struct intel_plane *plane,
                       enum pipe *pipe)
 {
@@ -613,7 +797,6 @@ vlv_update_plane(struct intel_plane *plane,
                 const struct intel_plane_state *plane_state)
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       const struct drm_framebuffer *fb = plane_state->base.fb;
        enum pipe pipe = plane->pipe;
        enum plane_id plane_id = plane->id;
        u32 sprctl = plane_state->ctl;
@@ -650,10 +833,8 @@ vlv_update_plane(struct intel_plane *plane,
                      plane_state->color_plane[0].stride);
        I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
 
-       if (fb->modifier == I915_FORMAT_MOD_X_TILED)
-               I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
-       else
-               I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
+       I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
+       I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
 
        I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
 
@@ -661,7 +842,6 @@ vlv_update_plane(struct intel_plane *plane,
        I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl);
        I915_WRITE_FW(SPSURF(pipe, plane_id),
                      intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
-       POSTING_READ_FW(SPSURF(pipe, plane_id));
 
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
@@ -677,9 +857,7 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
        I915_WRITE_FW(SPCNTR(pipe, plane_id), 0);
-
        I915_WRITE_FW(SPSURF(pipe, plane_id), 0);
-       POSTING_READ_FW(SPSURF(pipe, plane_id));
 
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
@@ -774,7 +952,6 @@ ivb_update_plane(struct intel_plane *plane,
                 const struct intel_plane_state *plane_state)
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       const struct drm_framebuffer *fb = plane_state->base.fb;
        enum pipe pipe = plane->pipe;
        u32 sprctl = plane_state->ctl, sprscale = 0;
        u32 sprsurf_offset = plane_state->color_plane[0].offset;
@@ -814,12 +991,12 @@ ivb_update_plane(struct intel_plane *plane,
 
        /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
         * register */
-       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
                I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x);
-       else if (fb->modifier == I915_FORMAT_MOD_X_TILED)
+       } else {
                I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
-       else
                I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
+       }
 
        I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
        if (IS_IVYBRIDGE(dev_priv))
@@ -827,7 +1004,6 @@ ivb_update_plane(struct intel_plane *plane,
        I915_WRITE_FW(SPRCTL(pipe), sprctl);
        I915_WRITE_FW(SPRSURF(pipe),
                      intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
-       POSTING_READ_FW(SPRSURF(pipe));
 
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
@@ -845,9 +1021,7 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
        /* Can't leave the scaler enabled... */
        if (IS_IVYBRIDGE(dev_priv))
                I915_WRITE_FW(SPRSCALE(pipe), 0);
-
        I915_WRITE_FW(SPRSURF(pipe), 0);
-       POSTING_READ_FW(SPRSURF(pipe));
 
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
@@ -946,7 +1120,6 @@ g4x_update_plane(struct intel_plane *plane,
                 const struct intel_plane_state *plane_state)
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       const struct drm_framebuffer *fb = plane_state->base.fb;
        enum pipe pipe = plane->pipe;
        u32 dvscntr = plane_state->ctl, dvsscale = 0;
        u32 dvssurf_offset = plane_state->color_plane[0].offset;
@@ -984,17 +1157,14 @@ g4x_update_plane(struct intel_plane *plane,
        I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
        I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
 
-       if (fb->modifier == I915_FORMAT_MOD_X_TILED)
-               I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
-       else
-               I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
+       I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
+       I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
 
        I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
        I915_WRITE_FW(DVSSCALE(pipe), dvsscale);
        I915_WRITE_FW(DVSCNTR(pipe), dvscntr);
        I915_WRITE_FW(DVSSURF(pipe),
                      intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
-       POSTING_READ_FW(DVSSURF(pipe));
 
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
@@ -1011,9 +1181,7 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
        I915_WRITE_FW(DVSCNTR(pipe), 0);
        /* Disable the scaler */
        I915_WRITE_FW(DVSSCALE(pipe), 0);
-
        I915_WRITE_FW(DVSSURF(pipe), 0);
-       POSTING_READ_FW(DVSSURF(pipe));
 
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
@@ -1039,6 +1207,19 @@ g4x_plane_get_hw_state(struct intel_plane *plane,
        return ret;
 }
 
+static bool intel_fb_scalable(const struct drm_framebuffer *fb)
+{
+       if (!fb)
+               return false;
+
+       switch (fb->format->format) {
+       case DRM_FORMAT_C8:
+               return false;
+       default:
+               return true;
+       }
+}
+
 static int
 g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
                         struct intel_plane_state *plane_state)
@@ -1106,18 +1287,18 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
 {
        struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       int max_scale, min_scale;
+       int min_scale = DRM_PLANE_HELPER_NO_SCALING;
+       int max_scale = DRM_PLANE_HELPER_NO_SCALING;
        int ret;
 
-       if (INTEL_GEN(dev_priv) < 7) {
-               min_scale = 1;
-               max_scale = 16 << 16;
-       } else if (IS_IVYBRIDGE(dev_priv)) {
-               min_scale = 1;
-               max_scale = 2 << 16;
-       } else {
-               min_scale = DRM_PLANE_HELPER_NO_SCALING;
-               max_scale = DRM_PLANE_HELPER_NO_SCALING;
+       if (intel_fb_scalable(plane_state->base.fb)) {
+               if (INTEL_GEN(dev_priv) < 7) {
+                       min_scale = 1;
+                       max_scale = 16 << 16;
+               } else if (IS_IVYBRIDGE(dev_priv)) {
+                       min_scale = 1;
+                       max_scale = 2 << 16;
+               }
        }
 
        ret = drm_atomic_helper_check_plane_state(&plane_state->base,
@@ -1204,6 +1385,8 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
 static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
                              const struct intel_plane_state *plane_state)
 {
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        const struct drm_framebuffer *fb = plane_state->base.fb;
        unsigned int rotation = plane_state->base.rotation;
        struct drm_format_name_buf format_name;
@@ -1232,13 +1415,17 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
                }
 
                /*
-                * 90/270 is not allowed with RGB64 16:16:16:16,
-                * RGB 16-bit 5:6:5, and Indexed 8-bit.
-                * TBD: Add RGB64 case once its added in supported format list.
+                * 90/270 is not allowed with RGB64 16:16:16:16 and
+                * Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards.
+                * TBD: Add RGB64 case once its added in supported format
+                * list.
                 */
                switch (fb->format->format) {
-               case DRM_FORMAT_C8:
                case DRM_FORMAT_RGB565:
+                       if (INTEL_GEN(dev_priv) >= 11)
+                               break;
+                       /* fall through */
+               case DRM_FORMAT_C8:
                        DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
                                      drm_get_format_name(fb->format->format,
                                                          &format_name));
@@ -1292,12 +1479,31 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
        return 0;
 }
 
-int skl_plane_check(struct intel_crtc_state *crtc_state,
-                   struct intel_plane_state *plane_state)
+static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state)
+{
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       unsigned int rotation = plane_state->base.rotation;
+       int src_w = drm_rect_width(&plane_state->base.src) >> 16;
+
+       /* Display WA #1106 */
+       if (fb->format->format == DRM_FORMAT_NV12 && src_w & 3 &&
+           (rotation == DRM_MODE_ROTATE_270 ||
+            rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
+               DRM_DEBUG_KMS("src width must be multiple of 4 for rotated NV12\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int skl_plane_check(struct intel_crtc_state *crtc_state,
+                          struct intel_plane_state *plane_state)
 {
        struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       int max_scale, min_scale;
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       int min_scale = DRM_PLANE_HELPER_NO_SCALING;
+       int max_scale = DRM_PLANE_HELPER_NO_SCALING;
        int ret;
 
        ret = skl_plane_check_fb(crtc_state, plane_state);
@@ -1305,15 +1511,9 @@ int skl_plane_check(struct intel_crtc_state *crtc_state,
                return ret;
 
        /* use scaler when colorkey is not required */
-       if (!plane_state->ckey.flags) {
-               const struct drm_framebuffer *fb = plane_state->base.fb;
-
+       if (!plane_state->ckey.flags && intel_fb_scalable(fb)) {
                min_scale = 1;
-               max_scale = skl_max_scale(crtc_state,
-                                         fb ? fb->format->format : 0);
-       } else {
-               min_scale = DRM_PLANE_HELPER_NO_SCALING;
-               max_scale = DRM_PLANE_HELPER_NO_SCALING;
+               max_scale = skl_max_scale(crtc_state, fb->format->format);
        }
 
        ret = drm_atomic_helper_check_plane_state(&plane_state->base,
@@ -1334,10 +1534,18 @@ int skl_plane_check(struct intel_crtc_state *crtc_state,
        if (ret)
                return ret;
 
+       ret = skl_plane_check_nv12_rotation(plane_state);
+       if (ret)
+               return ret;
+
        ret = skl_check_plane_surface(plane_state);
        if (ret)
                return ret;
 
+       /* HW only has 8 bits pixel precision, disable plane if invisible */
+       if (!(plane_state->base.alpha >> 8))
+               plane_state->base.visible = false;
+
        plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
 
        if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -1502,24 +1710,30 @@ static const uint32_t vlv_plane_formats[] = {
        DRM_FORMAT_VYUY,
 };
 
-static uint32_t skl_plane_formats[] = {
+static const uint32_t skl_plane_formats[] = {
+       DRM_FORMAT_C8,
        DRM_FORMAT_RGB565,
-       DRM_FORMAT_ABGR8888,
-       DRM_FORMAT_ARGB8888,
-       DRM_FORMAT_XBGR8888,
        DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_ABGR8888,
+       DRM_FORMAT_XRGB2101010,
+       DRM_FORMAT_XBGR2101010,
        DRM_FORMAT_YUYV,
        DRM_FORMAT_YVYU,
        DRM_FORMAT_UYVY,
        DRM_FORMAT_VYUY,
 };
 
-static uint32_t skl_planar_formats[] = {
+static const uint32_t skl_planar_formats[] = {
+       DRM_FORMAT_C8,
        DRM_FORMAT_RGB565,
-       DRM_FORMAT_ABGR8888,
-       DRM_FORMAT_ARGB8888,
-       DRM_FORMAT_XBGR8888,
        DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_ABGR8888,
+       DRM_FORMAT_XRGB2101010,
+       DRM_FORMAT_XBGR2101010,
        DRM_FORMAT_YUYV,
        DRM_FORMAT_YVYU,
        DRM_FORMAT_UYVY,
@@ -1724,8 +1938,36 @@ static const struct drm_plane_funcs skl_plane_funcs = {
        .format_mod_supported = skl_plane_format_mod_supported,
 };
 
-bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
-                      enum pipe pipe, enum plane_id plane_id)
+static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
+                             enum pipe pipe, enum plane_id plane_id)
+{
+       if (!HAS_FBC(dev_priv))
+               return false;
+
+       return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
+}
+
+static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
+                                enum pipe pipe, enum plane_id plane_id)
+{
+       if (INTEL_GEN(dev_priv) >= 11)
+               return plane_id <= PLANE_SPRITE3;
+
+       /* Display WA #0870: skl, bxt */
+       if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+               return false;
+
+       if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
+               return false;
+
+       if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
+               return false;
+
+       return true;
+}
+
+static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
+                             enum pipe pipe, enum plane_id plane_id)
 {
        if (plane_id == PLANE_CURSOR)
                return false;
@@ -1742,109 +1984,173 @@ bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
 }
 
 struct intel_plane *
-intel_sprite_plane_create(struct drm_i915_private *dev_priv,
-                         enum pipe pipe, int plane)
+skl_universal_plane_create(struct drm_i915_private *dev_priv,
+                          enum pipe pipe, enum plane_id plane_id)
 {
-       struct intel_plane *intel_plane = NULL;
-       struct intel_plane_state *state = NULL;
-       const struct drm_plane_funcs *plane_funcs;
-       unsigned long possible_crtcs;
-       const uint32_t *plane_formats;
-       const uint64_t *modifiers;
+       struct intel_plane *plane;
+       enum drm_plane_type plane_type;
        unsigned int supported_rotations;
-       int num_plane_formats;
+       unsigned int possible_crtcs;
+       const u64 *modifiers;
+       const u32 *formats;
+       int num_formats;
        int ret;
 
-       intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
-       if (!intel_plane) {
-               ret = -ENOMEM;
-               goto fail;
+       plane = intel_plane_alloc();
+       if (IS_ERR(plane))
+               return plane;
+
+       plane->pipe = pipe;
+       plane->id = plane_id;
+       plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id);
+
+       plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id);
+       if (plane->has_fbc) {
+               struct intel_fbc *fbc = &dev_priv->fbc;
+
+               fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
        }
 
-       state = intel_create_plane_state(&intel_plane->base);
-       if (!state) {
-               ret = -ENOMEM;
-               goto fail;
+       plane->max_stride = skl_plane_max_stride;
+       plane->update_plane = skl_update_plane;
+       plane->disable_plane = skl_disable_plane;
+       plane->get_hw_state = skl_plane_get_hw_state;
+       plane->check_plane = skl_plane_check;
+       if (icl_is_nv12_y_plane(plane_id))
+               plane->update_slave = icl_update_slave;
+
+       if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
+               formats = skl_planar_formats;
+               num_formats = ARRAY_SIZE(skl_planar_formats);
+       } else {
+               formats = skl_plane_formats;
+               num_formats = ARRAY_SIZE(skl_plane_formats);
        }
-       intel_plane->base.state = &state->base;
 
-       if (INTEL_GEN(dev_priv) >= 9) {
-               state->scaler_id = -1;
+       plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
+       if (plane->has_ccs)
+               modifiers = skl_plane_format_modifiers_ccs;
+       else
+               modifiers = skl_plane_format_modifiers_noccs;
 
-               intel_plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
-                                                        PLANE_SPRITE0 + plane);
+       if (plane_id == PLANE_PRIMARY)
+               plane_type = DRM_PLANE_TYPE_PRIMARY;
+       else
+               plane_type = DRM_PLANE_TYPE_OVERLAY;
 
-               intel_plane->max_stride = skl_plane_max_stride;
-               intel_plane->update_plane = skl_update_plane;
-               intel_plane->disable_plane = skl_disable_plane;
-               intel_plane->get_hw_state = skl_plane_get_hw_state;
-               intel_plane->check_plane = skl_plane_check;
+       possible_crtcs = BIT(pipe);
 
-               if (skl_plane_has_planar(dev_priv, pipe,
-                                        PLANE_SPRITE0 + plane)) {
-                       plane_formats = skl_planar_formats;
-                       num_plane_formats = ARRAY_SIZE(skl_planar_formats);
-               } else {
-                       plane_formats = skl_plane_formats;
-                       num_plane_formats = ARRAY_SIZE(skl_plane_formats);
-               }
+       ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+                                      possible_crtcs, &skl_plane_funcs,
+                                      formats, num_formats, modifiers,
+                                      plane_type,
+                                      "plane %d%c", plane_id + 1,
+                                      pipe_name(pipe));
+       if (ret)
+               goto fail;
 
-               if (intel_plane->has_ccs)
-                       modifiers = skl_plane_format_modifiers_ccs;
-               else
-                       modifiers = skl_plane_format_modifiers_noccs;
-
-               plane_funcs = &skl_plane_funcs;
-       } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-               intel_plane->max_stride = i9xx_plane_max_stride;
-               intel_plane->update_plane = vlv_update_plane;
-               intel_plane->disable_plane = vlv_disable_plane;
-               intel_plane->get_hw_state = vlv_plane_get_hw_state;
-               intel_plane->check_plane = vlv_sprite_check;
-
-               plane_formats = vlv_plane_formats;
-               num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
+       supported_rotations =
+               DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
+               DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
+
+       if (INTEL_GEN(dev_priv) >= 10)
+               supported_rotations |= DRM_MODE_REFLECT_X;
+
+       drm_plane_create_rotation_property(&plane->base,
+                                          DRM_MODE_ROTATE_0,
+                                          supported_rotations);
+
+       drm_plane_create_color_properties(&plane->base,
+                                         BIT(DRM_COLOR_YCBCR_BT601) |
+                                         BIT(DRM_COLOR_YCBCR_BT709),
+                                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+                                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+                                         DRM_COLOR_YCBCR_BT709,
+                                         DRM_COLOR_YCBCR_LIMITED_RANGE);
+
+       drm_plane_create_alpha_property(&plane->base);
+       drm_plane_create_blend_mode_property(&plane->base,
+                                            BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+                                            BIT(DRM_MODE_BLEND_PREMULTI) |
+                                            BIT(DRM_MODE_BLEND_COVERAGE));
+
+       drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
+
+       return plane;
+
+fail:
+       intel_plane_free(plane);
+
+       return ERR_PTR(ret);
+}
+
+struct intel_plane *
+intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+                         enum pipe pipe, int sprite)
+{
+       struct intel_plane *plane;
+       const struct drm_plane_funcs *plane_funcs;
+       unsigned long possible_crtcs;
+       unsigned int supported_rotations;
+       const u64 *modifiers;
+       const u32 *formats;
+       int num_formats;
+       int ret;
+
+       if (INTEL_GEN(dev_priv) >= 9)
+               return skl_universal_plane_create(dev_priv, pipe,
+                                                 PLANE_SPRITE0 + sprite);
+
+       plane = intel_plane_alloc();
+       if (IS_ERR(plane))
+               return plane;
+
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+               plane->max_stride = i9xx_plane_max_stride;
+               plane->update_plane = vlv_update_plane;
+               plane->disable_plane = vlv_disable_plane;
+               plane->get_hw_state = vlv_plane_get_hw_state;
+               plane->check_plane = vlv_sprite_check;
+
+               formats = vlv_plane_formats;
+               num_formats = ARRAY_SIZE(vlv_plane_formats);
                modifiers = i9xx_plane_format_modifiers;
 
                plane_funcs = &vlv_sprite_funcs;
        } else if (INTEL_GEN(dev_priv) >= 7) {
-               intel_plane->max_stride = g4x_sprite_max_stride;
-               intel_plane->update_plane = ivb_update_plane;
-               intel_plane->disable_plane = ivb_disable_plane;
-               intel_plane->get_hw_state = ivb_plane_get_hw_state;
-               intel_plane->check_plane = g4x_sprite_check;
-
-               plane_formats = snb_plane_formats;
-               num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+               plane->max_stride = g4x_sprite_max_stride;
+               plane->update_plane = ivb_update_plane;
+               plane->disable_plane = ivb_disable_plane;
+               plane->get_hw_state = ivb_plane_get_hw_state;
+               plane->check_plane = g4x_sprite_check;
+
+               formats = snb_plane_formats;
+               num_formats = ARRAY_SIZE(snb_plane_formats);
                modifiers = i9xx_plane_format_modifiers;
 
                plane_funcs = &snb_sprite_funcs;
        } else {
-               intel_plane->max_stride = g4x_sprite_max_stride;
-               intel_plane->update_plane = g4x_update_plane;
-               intel_plane->disable_plane = g4x_disable_plane;
-               intel_plane->get_hw_state = g4x_plane_get_hw_state;
-               intel_plane->check_plane = g4x_sprite_check;
+               plane->max_stride = g4x_sprite_max_stride;
+               plane->update_plane = g4x_update_plane;
+               plane->disable_plane = g4x_disable_plane;
+               plane->get_hw_state = g4x_plane_get_hw_state;
+               plane->check_plane = g4x_sprite_check;
 
                modifiers = i9xx_plane_format_modifiers;
                if (IS_GEN6(dev_priv)) {
-                       plane_formats = snb_plane_formats;
-                       num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+                       formats = snb_plane_formats;
+                       num_formats = ARRAY_SIZE(snb_plane_formats);
 
                        plane_funcs = &snb_sprite_funcs;
                } else {
-                       plane_formats = g4x_plane_formats;
-                       num_plane_formats = ARRAY_SIZE(g4x_plane_formats);
+                       formats = g4x_plane_formats;
+                       num_formats = ARRAY_SIZE(g4x_plane_formats);
 
                        plane_funcs = &g4x_sprite_funcs;
                }
        }
 
-       if (INTEL_GEN(dev_priv) >= 9) {
-               supported_rotations =
-                       DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
-                       DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
-       } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+       if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
                supported_rotations =
                        DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
                        DRM_MODE_REFLECT_X;
@@ -1853,35 +2159,25 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
                        DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
        }
 
-       intel_plane->pipe = pipe;
-       intel_plane->i9xx_plane = plane;
-       intel_plane->id = PLANE_SPRITE0 + plane;
-       intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, intel_plane->id);
+       plane->pipe = pipe;
+       plane->id = PLANE_SPRITE0 + sprite;
+       plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
 
-       possible_crtcs = (1 << pipe);
+       possible_crtcs = BIT(pipe);
 
-       if (INTEL_GEN(dev_priv) >= 9)
-               ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
-                                              possible_crtcs, plane_funcs,
-                                              plane_formats, num_plane_formats,
-                                              modifiers,
-                                              DRM_PLANE_TYPE_OVERLAY,
-                                              "plane %d%c", plane + 2, pipe_name(pipe));
-       else
-               ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
-                                              possible_crtcs, plane_funcs,
-                                              plane_formats, num_plane_formats,
-                                              modifiers,
-                                              DRM_PLANE_TYPE_OVERLAY,
-                                              "sprite %c", sprite_name(pipe, plane));
+       ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+                                      possible_crtcs, plane_funcs,
+                                      formats, num_formats, modifiers,
+                                      DRM_PLANE_TYPE_OVERLAY,
+                                      "sprite %c", sprite_name(pipe, sprite));
        if (ret)
                goto fail;
 
-       drm_plane_create_rotation_property(&intel_plane->base,
+       drm_plane_create_rotation_property(&plane->base,
                                           DRM_MODE_ROTATE_0,
                                           supported_rotations);
 
-       drm_plane_create_color_properties(&intel_plane->base,
+       drm_plane_create_color_properties(&plane->base,
                                          BIT(DRM_COLOR_YCBCR_BT601) |
                                          BIT(DRM_COLOR_YCBCR_BT709),
                                          BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
@@ -1889,13 +2185,12 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
                                          DRM_COLOR_YCBCR_BT709,
                                          DRM_COLOR_YCBCR_LIMITED_RANGE);
 
-       drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
+       drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
 
-       return intel_plane;
+       return plane;
 
 fail:
-       kfree(state);
-       kfree(intel_plane);
+       intel_plane_free(plane);
 
        return ERR_PTR(ret);
 }
index b5b04cb892e945b747f20702b5740b4af572644f..860f306a23bafbda312d63bafb2039ce67b91069 100644 (file)
@@ -885,6 +885,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
                return false;
 
+       pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
        adjusted_mode->crtc_clock = tv_mode->clock;
        DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
        pipe_config->pipe_bpp = 8*3;
@@ -1377,17 +1378,10 @@ intel_tv_get_modes(struct drm_connector *connector)
        return count;
 }
 
-static void
-intel_tv_destroy(struct drm_connector *connector)
-{
-       drm_connector_cleanup(connector);
-       kfree(connector);
-}
-
 static const struct drm_connector_funcs intel_tv_connector_funcs = {
        .late_register = intel_connector_register,
        .early_unregister = intel_connector_unregister,
-       .destroy = intel_tv_destroy,
+       .destroy = intel_connector_destroy,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
        .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
index b1b3e81b6e241568e4d54986062fb8a2ea7a5b7d..b34c318b238dad37a027aad52b5a920e2c73f3dc 100644 (file)
@@ -376,7 +376,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915)
 
                intel_guc_init_params(guc);
                ret = intel_guc_fw_upload(guc);
-               if (ret == 0 || ret != -EAGAIN)
+               if (ret == 0 || ret != -ETIMEDOUT)
                        break;
 
                DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
index 87910aa8326760eb21172b685cd24ac6308032c4..0e3bd580e267ffe569c9897cacaf1ce807cb501e 100644 (file)
@@ -115,9 +115,14 @@ static inline bool intel_uc_fw_is_selected(struct intel_uc_fw *uc_fw)
        return uc_fw->path != NULL;
 }
 
+static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw)
+{
+       return uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS;
+}
+
 static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw)
 {
-       if (uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS)
+       if (intel_uc_fw_is_loaded(uc_fw))
                uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
 }
 
index 3ad302c66254bb0b74703fa44df4e305a5013a33..9289515108c3182a6b0aa363639a631eed97e7a0 100644 (file)
@@ -1437,7 +1437,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
                                       FORCEWAKE_MEDIA_VEBOX_GEN11(i),
                                       FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
                }
-       } else if (IS_GEN9(dev_priv) || IS_GEN10(dev_priv)) {
+       } else if (IS_GEN10(dev_priv) || IS_GEN9(dev_priv)) {
                dev_priv->uncore.funcs.force_wake_get =
                        fw_domains_get_with_fallback;
                dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
index bba98cf83cbd9d1c0348e42acb5f8fba98a106dc..bf3662ad5fed1ebcb3a49a870ccdd2de4de3fbeb 100644 (file)
@@ -326,6 +326,13 @@ enum vbt_gmbus_ddi {
        ICL_DDC_BUS_PORT_4,
 };
 
+#define DP_AUX_A 0x40
+#define DP_AUX_B 0x10
+#define DP_AUX_C 0x20
+#define DP_AUX_D 0x30
+#define DP_AUX_E 0x50
+#define DP_AUX_F 0x60
+
 #define VBT_DP_MAX_LINK_RATE_HBR3      0
 #define VBT_DP_MAX_LINK_RATE_HBR2      1
 #define VBT_DP_MAX_LINK_RATE_HBR       2
index 4bcdeaf8d98fa3de5aec7790971098905b5a688b..ca1f78a42b177cbb90eb04fb44f816a4a96d5a4d 100644 (file)
@@ -823,18 +823,21 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
                   _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
 
        /* WaInPlaceDecompressionHang:icl */
-       I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-                                           GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+       I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+                  I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+                  GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 
        /* WaPipelineFlushCoherentLines:icl */
-       I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
-                                  GEN8_LQSC_FLUSH_COHERENT_LINES);
+       I915_WRITE(GEN8_L3SQCREG4,
+                  I915_READ(GEN8_L3SQCREG4) |
+                  GEN8_LQSC_FLUSH_COHERENT_LINES);
 
        /* Wa_1405543622:icl
         * Formerly known as WaGAPZPriorityScheme
         */
-       I915_WRITE(GEN8_GARBCNTL, I915_READ(GEN8_GARBCNTL) |
-                                 GEN11_ARBITRATION_PRIO_ORDER_MASK);
+       I915_WRITE(GEN8_GARBCNTL,
+                  I915_READ(GEN8_GARBCNTL) |
+                  GEN11_ARBITRATION_PRIO_ORDER_MASK);
 
        /* Wa_1604223664:icl
         * Formerly known as WaL3BankAddressHashing
@@ -854,21 +857,24 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
        /* Wa_1405733216:icl
         * Formerly known as WaDisableCleanEvicts
         */
-       I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
-                                  GEN11_LQSC_CLEAN_EVICT_DISABLE);
+       I915_WRITE(GEN8_L3SQCREG4,
+                  I915_READ(GEN8_L3SQCREG4) |
+                  GEN11_LQSC_CLEAN_EVICT_DISABLE);
 
        /* Wa_1405766107:icl
         * Formerly known as WaCL2SFHalfMaxAlloc
         */
-       I915_WRITE(GEN11_LSN_UNSLCVC, I915_READ(GEN11_LSN_UNSLCVC) |
-                                     GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
-                                     GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
+       I915_WRITE(GEN11_LSN_UNSLCVC,
+                  I915_READ(GEN11_LSN_UNSLCVC) |
+                  GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
+                  GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
 
        /* Wa_220166154:icl
         * Formerly known as WaDisCtxReload
         */
-       I915_WRITE(GAMW_ECO_DEV_RW_IA_REG, I915_READ(GAMW_ECO_DEV_RW_IA_REG) |
-                                          GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
+       I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
+                  I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
+                  GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
 
        /* Wa_1405779004:icl (pre-prod) */
        if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
@@ -905,6 +911,13 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
        I915_WRITE(GAMT_CHKN_BIT_REG,
                   I915_READ(GAMT_CHKN_BIT_REG) |
                   GAMT_CHKN_DISABLE_L3_COH_PIPE);
+
+       /* Wa_1406609255:icl (pre-prod) */
+       if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
+               I915_WRITE(GEN7_SARCHKMD,
+                          I915_READ(GEN7_SARCHKMD) |
+                          GEN7_DISABLE_DEMAND_PREFETCH |
+                          GEN7_DISABLE_SAMPLER_PREFETCH);
 }
 
 void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
@@ -941,7 +954,7 @@ struct whitelist {
 
 static void whitelist_reg(struct whitelist *w, i915_reg_t reg)
 {
-       if (GEM_WARN_ON(w->count >= RING_MAX_NONPRIV_SLOTS))
+       if (GEM_DEBUG_WARN_ON(w->count >= RING_MAX_NONPRIV_SLOTS))
                return;
 
        w->reg[w->count++] = reg;
@@ -1009,6 +1022,11 @@ static void cnl_whitelist_build(struct whitelist *w)
 
 static void icl_whitelist_build(struct whitelist *w)
 {
+       /* WaAllowUMDToModifyHalfSliceChicken7:icl */
+       whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
+
+       /* WaAllowUMDToModifySamplerMode:icl */
+       whitelist_reg(w, GEN10_SAMPLER_MODE);
 }
 
 static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
index 8d03f64eabd71d449ebedca890135d69dabb1a74..26c065c8d2c0a7e3b550dc96fa94ee611daae296 100644 (file)
@@ -551,7 +551,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
                        err = igt_check_page_sizes(vma);
 
                        if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
-                               pr_err("page_sizes.gtt=%u, expected %lu\n",
+                               pr_err("page_sizes.gtt=%u, expected %llu\n",
                                       vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
                                err = -EINVAL;
                        }
@@ -1135,7 +1135,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
        n = 0;
        for_each_engine(engine, i915, id) {
                if (!intel_engine_can_store_dword(engine)) {
-                       pr_info("store-dword-imm not supported on engine=%u\n", id);
+                       pr_info("store-dword-imm not supported on engine=%u\n",
+                               id);
                        continue;
                }
                engines[n++] = engine;
@@ -1167,17 +1168,30 @@ static int igt_write_huge(struct i915_gem_context *ctx,
                engine = engines[order[i] % n];
                i = (i + 1) % (n * I915_NUM_ENGINES);
 
-               err = __igt_write_huge(ctx, engine, obj, size, offset_low, dword, num + 1);
+               /*
+                * In order to utilize 64K pages we need to both pad the vma
+                * size and ensure the vma offset is at the start of the pt
+                * boundary, however to improve coverage we opt for testing both
+                * aligned and unaligned offsets.
+                */
+               if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
+                       offset_low = round_down(offset_low,
+                                               I915_GTT_PAGE_SIZE_2M);
+
+               err = __igt_write_huge(ctx, engine, obj, size, offset_low,
+                                      dword, num + 1);
                if (err)
                        break;
 
-               err = __igt_write_huge(ctx, engine, obj, size, offset_high, dword, num + 1);
+               err = __igt_write_huge(ctx, engine, obj, size, offset_high,
+                                      dword, num + 1);
                if (err)
                        break;
 
                if (igt_timeout(end_time,
                                "%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
-                               __func__, engine->id, offset_low, offset_high, max_page_size))
+                               __func__, engine->id, offset_low, offset_high,
+                               max_page_size))
                        break;
        }
 
@@ -1436,7 +1450,7 @@ static int igt_ppgtt_pin_update(void *arg)
         * huge-gtt-pages.
         */
 
-       if (!USES_FULL_48BIT_PPGTT(dev_priv)) {
+       if (!HAS_FULL_48BIT_PPGTT(dev_priv)) {
                pr_info("48b PPGTT not supported, skipping\n");
                return 0;
        }
@@ -1687,10 +1701,9 @@ int i915_gem_huge_page_mock_selftests(void)
                SUBTEST(igt_mock_ppgtt_huge_fill),
                SUBTEST(igt_mock_ppgtt_64K),
        };
-       int saved_ppgtt = i915_modparams.enable_ppgtt;
        struct drm_i915_private *dev_priv;
-       struct pci_dev *pdev;
        struct i915_hw_ppgtt *ppgtt;
+       struct pci_dev *pdev;
        int err;
 
        dev_priv = mock_gem_device();
@@ -1698,7 +1711,7 @@ int i915_gem_huge_page_mock_selftests(void)
                return -ENOMEM;
 
        /* Pretend to be a device which supports the 48b PPGTT */
-       i915_modparams.enable_ppgtt = 3;
+       mkwrite_device_info(dev_priv)->ppgtt = INTEL_PPGTT_FULL_4LVL;
 
        pdev = dev_priv->drm.pdev;
        dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
@@ -1731,9 +1744,6 @@ out_close:
 
 out_unlock:
        mutex_unlock(&dev_priv->drm.struct_mutex);
-
-       i915_modparams.enable_ppgtt = saved_ppgtt;
-
        drm_dev_put(&dev_priv->drm);
 
        return err;
@@ -1753,7 +1763,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
        struct i915_gem_context *ctx;
        int err;
 
-       if (!USES_PPGTT(dev_priv)) {
+       if (!HAS_PPGTT(dev_priv)) {
                pr_info("PPGTT not supported, skipping live-selftests\n");
                return 0;
        }
index 76df25aa90c92b9da8f8a91ec785a54d82b13a8c..7d82043aff1099c82a35fde7fe0bce2f4b454bcb 100644 (file)
@@ -39,7 +39,8 @@ struct live_test {
        const char *func;
        const char *name;
 
-       unsigned int reset_count;
+       unsigned int reset_global;
+       unsigned int reset_engine[I915_NUM_ENGINES];
 };
 
 static int begin_live_test(struct live_test *t,
@@ -47,6 +48,8 @@ static int begin_live_test(struct live_test *t,
                           const char *func,
                           const char *name)
 {
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
        int err;
 
        t->i915 = i915;
@@ -63,7 +66,11 @@ static int begin_live_test(struct live_test *t,
        }
 
        i915->gpu_error.missed_irq_rings = 0;
-       t->reset_count = i915_reset_count(&i915->gpu_error);
+       t->reset_global = i915_reset_count(&i915->gpu_error);
+
+       for_each_engine(engine, i915, id)
+               t->reset_engine[id] =
+                       i915_reset_engine_count(&i915->gpu_error, engine);
 
        return 0;
 }
@@ -71,14 +78,28 @@ static int begin_live_test(struct live_test *t,
 static int end_live_test(struct live_test *t)
 {
        struct drm_i915_private *i915 = t->i915;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
 
        if (igt_flush_test(i915, I915_WAIT_LOCKED))
                return -EIO;
 
-       if (t->reset_count != i915_reset_count(&i915->gpu_error)) {
+       if (t->reset_global != i915_reset_count(&i915->gpu_error)) {
                pr_err("%s(%s): GPU was reset %d times!\n",
                       t->func, t->name,
-                      i915_reset_count(&i915->gpu_error) - t->reset_count);
+                      i915_reset_count(&i915->gpu_error) - t->reset_global);
+               return -EIO;
+       }
+
+       for_each_engine(engine, i915, id) {
+               if (t->reset_engine[id] ==
+                   i915_reset_engine_count(&i915->gpu_error, engine))
+                       continue;
+
+               pr_err("%s(%s): engine '%s' was reset %d times!\n",
+                      t->func, t->name, engine->name,
+                      i915_reset_engine_count(&i915->gpu_error, engine) -
+                      t->reset_engine[id]);
                return -EIO;
        }
 
@@ -531,11 +552,11 @@ static int igt_ctx_exec(void *arg)
 {
        struct drm_i915_private *i915 = arg;
        struct drm_i915_gem_object *obj = NULL;
+       unsigned long ncontexts, ndwords, dw;
        struct drm_file *file;
        IGT_TIMEOUT(end_time);
        LIST_HEAD(objects);
-       unsigned long ncontexts, ndwords, dw;
-       bool first_shared_gtt = true;
+       struct live_test t;
        int err = -ENODEV;
 
        /*
@@ -553,6 +574,10 @@ static int igt_ctx_exec(void *arg)
 
        mutex_lock(&i915->drm.struct_mutex);
 
+       err = begin_live_test(&t, i915, __func__, "");
+       if (err)
+               goto out_unlock;
+
        ncontexts = 0;
        ndwords = 0;
        dw = 0;
@@ -561,12 +586,7 @@ static int igt_ctx_exec(void *arg)
                struct i915_gem_context *ctx;
                unsigned int id;
 
-               if (first_shared_gtt) {
-                       ctx = __create_hw_context(i915, file->driver_priv);
-                       first_shared_gtt = false;
-               } else {
-                       ctx = i915_gem_create_context(i915, file->driver_priv);
-               }
+               ctx = i915_gem_create_context(i915, file->driver_priv);
                if (IS_ERR(ctx)) {
                        err = PTR_ERR(ctx);
                        goto out_unlock;
@@ -622,7 +642,7 @@ static int igt_ctx_exec(void *arg)
        }
 
 out_unlock:
-       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+       if (end_live_test(&t))
                err = -EIO;
        mutex_unlock(&i915->drm.struct_mutex);
 
@@ -634,13 +654,14 @@ static int igt_ctx_readonly(void *arg)
 {
        struct drm_i915_private *i915 = arg;
        struct drm_i915_gem_object *obj = NULL;
+       struct i915_gem_context *ctx;
+       struct i915_hw_ppgtt *ppgtt;
+       unsigned long ndwords, dw;
        struct drm_file *file;
        I915_RND_STATE(prng);
        IGT_TIMEOUT(end_time);
        LIST_HEAD(objects);
-       struct i915_gem_context *ctx;
-       struct i915_hw_ppgtt *ppgtt;
-       unsigned long ndwords, dw;
+       struct live_test t;
        int err = -ENODEV;
 
        /*
@@ -655,6 +676,10 @@ static int igt_ctx_readonly(void *arg)
 
        mutex_lock(&i915->drm.struct_mutex);
 
+       err = begin_live_test(&t, i915, __func__, "");
+       if (err)
+               goto out_unlock;
+
        ctx = i915_gem_create_context(i915, file->driver_priv);
        if (IS_ERR(ctx)) {
                err = PTR_ERR(ctx);
@@ -727,7 +752,324 @@ static int igt_ctx_readonly(void *arg)
        }
 
 out_unlock:
-       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+       if (end_live_test(&t))
+               err = -EIO;
+       mutex_unlock(&i915->drm.struct_mutex);
+
+       mock_file_free(i915, file);
+       return err;
+}
+
+static int check_scratch(struct i915_gem_context *ctx, u64 offset)
+{
+       struct drm_mm_node *node =
+               __drm_mm_interval_first(&ctx->ppgtt->vm.mm,
+                                       offset, offset + sizeof(u32) - 1);
+       if (!node || node->start > offset)
+               return 0;
+
+       GEM_BUG_ON(offset >= node->start + node->size);
+
+       pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n",
+              upper_32_bits(offset), lower_32_bits(offset));
+       return -EINVAL;
+}
+
+static int write_to_scratch(struct i915_gem_context *ctx,
+                           struct intel_engine_cs *engine,
+                           u64 offset, u32 value)
+{
+       struct drm_i915_private *i915 = ctx->i915;
+       struct drm_i915_gem_object *obj;
+       struct i915_request *rq;
+       struct i915_vma *vma;
+       u32 *cmd;
+       int err;
+
+       GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
+
+       obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
+
+       cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+       if (IS_ERR(cmd)) {
+               err = PTR_ERR(cmd);
+               goto err;
+       }
+
+       *cmd++ = MI_STORE_DWORD_IMM_GEN4;
+       if (INTEL_GEN(i915) >= 8) {
+               *cmd++ = lower_32_bits(offset);
+               *cmd++ = upper_32_bits(offset);
+       } else {
+               *cmd++ = 0;
+               *cmd++ = offset;
+       }
+       *cmd++ = value;
+       *cmd = MI_BATCH_BUFFER_END;
+       i915_gem_object_unpin_map(obj);
+
+       err = i915_gem_object_set_to_gtt_domain(obj, false);
+       if (err)
+               goto err;
+
+       vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+       if (IS_ERR(vma)) {
+               err = PTR_ERR(vma);
+               goto err;
+       }
+
+       err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
+       if (err)
+               goto err;
+
+       err = check_scratch(ctx, offset);
+       if (err)
+               goto err_unpin;
+
+       rq = i915_request_alloc(engine, ctx);
+       if (IS_ERR(rq)) {
+               err = PTR_ERR(rq);
+               goto err_unpin;
+       }
+
+       err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+       if (err)
+               goto err_request;
+
+       err = i915_vma_move_to_active(vma, rq, 0);
+       if (err)
+               goto skip_request;
+
+       i915_gem_object_set_active_reference(obj);
+       i915_vma_unpin(vma);
+       i915_vma_close(vma);
+
+       i915_request_add(rq);
+
+       return 0;
+
+skip_request:
+       i915_request_skip(rq, err);
+err_request:
+       i915_request_add(rq);
+err_unpin:
+       i915_vma_unpin(vma);
+err:
+       i915_gem_object_put(obj);
+       return err;
+}
+
+static int read_from_scratch(struct i915_gem_context *ctx,
+                            struct intel_engine_cs *engine,
+                            u64 offset, u32 *value)
+{
+       struct drm_i915_private *i915 = ctx->i915;
+       struct drm_i915_gem_object *obj;
+       const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
+       const u32 result = 0x100;
+       struct i915_request *rq;
+       struct i915_vma *vma;
+       u32 *cmd;
+       int err;
+
+       GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
+
+       obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
+
+       cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+       if (IS_ERR(cmd)) {
+               err = PTR_ERR(cmd);
+               goto err;
+       }
+
+       memset(cmd, POISON_INUSE, PAGE_SIZE);
+       if (INTEL_GEN(i915) >= 8) {
+               *cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
+               *cmd++ = RCS_GPR0;
+               *cmd++ = lower_32_bits(offset);
+               *cmd++ = upper_32_bits(offset);
+               *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
+               *cmd++ = RCS_GPR0;
+               *cmd++ = result;
+               *cmd++ = 0;
+       } else {
+               *cmd++ = MI_LOAD_REGISTER_MEM;
+               *cmd++ = RCS_GPR0;
+               *cmd++ = offset;
+               *cmd++ = MI_STORE_REGISTER_MEM;
+               *cmd++ = RCS_GPR0;
+               *cmd++ = result;
+       }
+       *cmd = MI_BATCH_BUFFER_END;
+       i915_gem_object_unpin_map(obj);
+
+       err = i915_gem_object_set_to_gtt_domain(obj, false);
+       if (err)
+               goto err;
+
+       vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+       if (IS_ERR(vma)) {
+               err = PTR_ERR(vma);
+               goto err;
+       }
+
+       err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
+       if (err)
+               goto err;
+
+       err = check_scratch(ctx, offset);
+       if (err)
+               goto err_unpin;
+
+       rq = i915_request_alloc(engine, ctx);
+       if (IS_ERR(rq)) {
+               err = PTR_ERR(rq);
+               goto err_unpin;
+       }
+
+       err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+       if (err)
+               goto err_request;
+
+       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       if (err)
+               goto skip_request;
+
+       i915_vma_unpin(vma);
+       i915_vma_close(vma);
+
+       i915_request_add(rq);
+
+       err = i915_gem_object_set_to_cpu_domain(obj, false);
+       if (err)
+               goto err;
+
+       cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+       if (IS_ERR(cmd)) {
+               err = PTR_ERR(cmd);
+               goto err;
+       }
+
+       *value = cmd[result / sizeof(*cmd)];
+       i915_gem_object_unpin_map(obj);
+       i915_gem_object_put(obj);
+
+       return 0;
+
+skip_request:
+       i915_request_skip(rq, err);
+err_request:
+       i915_request_add(rq);
+err_unpin:
+       i915_vma_unpin(vma);
+err:
+       i915_gem_object_put(obj);
+       return err;
+}
+
+static int igt_vm_isolation(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct i915_gem_context *ctx_a, *ctx_b;
+       struct intel_engine_cs *engine;
+       struct drm_file *file;
+       I915_RND_STATE(prng);
+       unsigned long count;
+       struct live_test t;
+       unsigned int id;
+       u64 vm_total;
+       int err;
+
+       if (INTEL_GEN(i915) < 7)
+               return 0;
+
+       /*
+        * The simple goal here is that a write into one context is not
+        * observed in a second (separate page tables and scratch).
+        */
+
+       file = mock_file(i915);
+       if (IS_ERR(file))
+               return PTR_ERR(file);
+
+       mutex_lock(&i915->drm.struct_mutex);
+
+       err = begin_live_test(&t, i915, __func__, "");
+       if (err)
+               goto out_unlock;
+
+       ctx_a = i915_gem_create_context(i915, file->driver_priv);
+       if (IS_ERR(ctx_a)) {
+               err = PTR_ERR(ctx_a);
+               goto out_unlock;
+       }
+
+       ctx_b = i915_gem_create_context(i915, file->driver_priv);
+       if (IS_ERR(ctx_b)) {
+               err = PTR_ERR(ctx_b);
+               goto out_unlock;
+       }
+
+       /* We can only test vm isolation, if the vm are distinct */
+       if (ctx_a->ppgtt == ctx_b->ppgtt)
+               goto out_unlock;
+
+       vm_total = ctx_a->ppgtt->vm.total;
+       GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total);
+       vm_total -= I915_GTT_PAGE_SIZE;
+
+       intel_runtime_pm_get(i915);
+
+       count = 0;
+       for_each_engine(engine, i915, id) {
+               IGT_TIMEOUT(end_time);
+               unsigned long this = 0;
+
+               if (!intel_engine_can_store_dword(engine))
+                       continue;
+
+               while (!__igt_timeout(end_time, NULL)) {
+                       u32 value = 0xc5c5c5c5;
+                       u64 offset;
+
+                       div64_u64_rem(i915_prandom_u64_state(&prng),
+                                     vm_total, &offset);
+                       offset &= ~sizeof(u32);
+                       offset += I915_GTT_PAGE_SIZE;
+
+                       err = write_to_scratch(ctx_a, engine,
+                                              offset, 0xdeadbeef);
+                       if (err == 0)
+                               err = read_from_scratch(ctx_b, engine,
+                                                       offset, &value);
+                       if (err)
+                               goto out_rpm;
+
+                       if (value) {
+                               pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
+                                      engine->name, value,
+                                      upper_32_bits(offset),
+                                      lower_32_bits(offset),
+                                      this);
+                               err = -EINVAL;
+                               goto out_rpm;
+                       }
+
+                       this++;
+               }
+               count += this;
+       }
+       pr_info("Checked %lu scratch offsets across %d engines\n",
+               count, INTEL_INFO(i915)->num_rings);
+
+out_rpm:
+       intel_runtime_pm_put(i915);
+out_unlock:
+       if (end_live_test(&t))
                err = -EIO;
        mutex_unlock(&i915->drm.struct_mutex);
 
@@ -865,33 +1207,6 @@ out_unlock:
        return err;
 }
 
-static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
-{
-       struct drm_i915_gem_object *obj;
-       int err;
-
-       err = i915_gem_init_aliasing_ppgtt(i915);
-       if (err)
-               return err;
-
-       list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
-               struct i915_vma *vma;
-
-               vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
-               if (IS_ERR(vma))
-                       continue;
-
-               vma->flags &= ~I915_VMA_LOCAL_BIND;
-       }
-
-       return 0;
-}
-
-static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915)
-{
-       i915_gem_fini_aliasing_ppgtt(i915);
-}
-
 int i915_gem_context_mock_selftests(void)
 {
        static const struct i915_subtest tests[] = {
@@ -917,32 +1232,11 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
                SUBTEST(live_nop_switch),
                SUBTEST(igt_ctx_exec),
                SUBTEST(igt_ctx_readonly),
+               SUBTEST(igt_vm_isolation),
        };
-       bool fake_alias = false;
-       int err;
 
        if (i915_terminally_wedged(&dev_priv->gpu_error))
                return 0;
 
-       /* Install a fake aliasing gtt for exercise */
-       if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) {
-               mutex_lock(&dev_priv->drm.struct_mutex);
-               err = fake_aliasing_ppgtt_enable(dev_priv);
-               mutex_unlock(&dev_priv->drm.struct_mutex);
-               if (err)
-                       return err;
-
-               GEM_BUG_ON(!dev_priv->mm.aliasing_ppgtt);
-               fake_alias = true;
-       }
-
-       err = i915_subtests(tests, dev_priv);
-
-       if (fake_alias) {
-               mutex_lock(&dev_priv->drm.struct_mutex);
-               fake_aliasing_ppgtt_disable(dev_priv);
-               mutex_unlock(&dev_priv->drm.struct_mutex);
-       }
-
-       return err;
+       return i915_subtests(tests, dev_priv);
 }
index 128ad1cf0647a0986d83625669062382fb55a7b0..4365979d82228fa83c275f8a0f43b0ca6d11df60 100644 (file)
@@ -351,7 +351,7 @@ static int igt_evict_contexts(void *arg)
         * where the GTT space of the request is separate from the GGTT
         * allocation required to build the request.
         */
-       if (!USES_FULL_PPGTT(i915))
+       if (!HAS_FULL_PPGTT(i915))
                return 0;
 
        mutex_lock(&i915->drm.struct_mutex);
index 8e2e269db97e82917b299afbe680fc008b8c90a1..69fe86b30fbb79aaea1c8d8ece1edffe03c9d684 100644 (file)
@@ -153,7 +153,7 @@ static int igt_ppgtt_alloc(void *arg)
 
        /* Allocate a ppggt and try to fill the entire range */
 
-       if (!USES_PPGTT(dev_priv))
+       if (!HAS_PPGTT(dev_priv))
                return 0;
 
        ppgtt = __hw_ppgtt_create(dev_priv);
@@ -1001,7 +1001,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
        IGT_TIMEOUT(end_time);
        int err;
 
-       if (!USES_FULL_PPGTT(dev_priv))
+       if (!HAS_FULL_PPGTT(dev_priv))
                return 0;
 
        file = mock_file(dev_priv);
@@ -1337,7 +1337,7 @@ static int igt_gtt_reserve(void *arg)
                GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
                if (vma->node.start != total ||
                    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
-                       pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
+                       pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
                               vma->node.start, vma->node.size,
                               total, 2*I915_GTT_PAGE_SIZE);
                        err = -EINVAL;
@@ -1386,7 +1386,7 @@ static int igt_gtt_reserve(void *arg)
                GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
                if (vma->node.start != total ||
                    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
-                       pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
+                       pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
                               vma->node.start, vma->node.size,
                               total, 2*I915_GTT_PAGE_SIZE);
                        err = -EINVAL;
@@ -1430,7 +1430,7 @@ static int igt_gtt_reserve(void *arg)
                GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
                if (vma->node.start != offset ||
                    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
-                       pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
+                       pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
                               vma->node.start, vma->node.size,
                               offset, 2*I915_GTT_PAGE_SIZE);
                        err = -EINVAL;
index 0c0ab82b6228f84090d50806406350b820fa3ef9..32cba4cae31afad754a8e66df51bd01bdaff19cf 100644 (file)
@@ -159,6 +159,7 @@ static int igt_guc_clients(void *args)
         * Get rid of clients created during driver load because the test will
         * recreate them.
         */
+       guc_clients_disable(guc);
        guc_clients_destroy(guc);
        if (guc->execbuf_client || guc->preempt_client) {
                pr_err("guc_clients_destroy lied!\n");
@@ -197,8 +198,8 @@ static int igt_guc_clients(void *args)
                goto out;
        }
 
-       /* Now create the doorbells */
-       guc_clients_doorbell_init(guc);
+       /* Now enable the clients */
+       guc_clients_enable(guc);
 
        /* each client should now have received a doorbell */
        if (!client_doorbell_in_sync(guc->execbuf_client) ||
@@ -212,63 +213,17 @@ static int igt_guc_clients(void *args)
         * Basic test - an attempt to reallocate a valid doorbell to the
         * client it is currently assigned should not cause a failure.
         */
-       err = guc_clients_doorbell_init(guc);
-       if (err)
-               goto out;
-
-       /*
-        * Negative test - a client with no doorbell (invalid db id).
-        * After destroying the doorbell, the db id is changed to
-        * GUC_DOORBELL_INVALID and the firmware will reject any attempt to
-        * allocate a doorbell with an invalid id (db has to be reserved before
-        * allocation).
-        */
-       destroy_doorbell(guc->execbuf_client);
-       if (client_doorbell_in_sync(guc->execbuf_client)) {
-               pr_err("destroy db did not work\n");
-               err = -EINVAL;
-               goto out;
-       }
-
-       unreserve_doorbell(guc->execbuf_client);
-
-       __create_doorbell(guc->execbuf_client);
-       err = __guc_allocate_doorbell(guc, guc->execbuf_client->stage_id);
-       if (err != -EIO) {
-               pr_err("unexpected (err = %d)", err);
-               goto out_db;
-       }
-
-       if (!available_dbs(guc, guc->execbuf_client->priority)) {
-               pr_err("doorbell not available when it should\n");
-               err = -EIO;
-               goto out_db;
-       }
-
-out_db:
-       /* clean after test */
-       __destroy_doorbell(guc->execbuf_client);
-       err = reserve_doorbell(guc->execbuf_client);
-       if (err) {
-               pr_err("failed to reserve back the doorbell back\n");
-       }
        err = create_doorbell(guc->execbuf_client);
-       if (err) {
-               pr_err("recreate doorbell failed\n");
-               goto out;
-       }
 
 out:
        /*
         * Leave clean state for other test, plus the driver always destroy the
         * clients during unload.
         */
-       destroy_doorbell(guc->execbuf_client);
-       if (guc->preempt_client)
-               destroy_doorbell(guc->preempt_client);
+       guc_clients_disable(guc);
        guc_clients_destroy(guc);
        guc_clients_create(guc);
-       guc_clients_doorbell_init(guc);
+       guc_clients_enable(guc);
 unlock:
        intel_runtime_pm_put(dev_priv);
        mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -352,7 +307,7 @@ static int igt_guc_doorbells(void *arg)
 
                db_id = clients[i]->doorbell_id;
 
-               err = create_doorbell(clients[i]);
+               err = __guc_client_enable(clients[i]);
                if (err) {
                        pr_err("[%d] Failed to create a doorbell\n", i);
                        goto out;
@@ -378,7 +333,7 @@ static int igt_guc_doorbells(void *arg)
 out:
        for (i = 0; i < ATTEMPTS; i++)
                if (!IS_ERR_OR_NULL(clients[i])) {
-                       destroy_doorbell(clients[i]);
+                       __guc_client_disable(clients[i]);
                        guc_client_free(clients[i]);
                }
 unlock:
index db378226ac105e4df702bccfd795097cdd695180..defe671130abfb99cccb4209b230673b4f6fb929 100644 (file)
@@ -76,7 +76,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
        h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
 
        vaddr = i915_gem_object_pin_map(h->obj,
-                                       HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC);
+                                       i915_coherent_map_type(i915));
        if (IS_ERR(vaddr)) {
                err = PTR_ERR(vaddr);
                goto err_unpin_hws;
@@ -234,7 +234,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
                        return ERR_CAST(obj);
 
                vaddr = i915_gem_object_pin_map(obj,
-                                               HAS_LLC(h->i915) ? I915_MAP_WB : I915_MAP_WC);
+                                               i915_coherent_map_type(h->i915));
                if (IS_ERR(vaddr)) {
                        i915_gem_object_put(obj);
                        return ERR_CAST(vaddr);
@@ -1150,6 +1150,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
                tsk = NULL;
                goto out_reset;
        }
+       get_task_struct(tsk);
 
        wait_for_completion(&arg.completion);
 
@@ -1172,6 +1173,8 @@ out_reset:
                /* The reset, even indirectly, should take less than 10ms. */
                igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
                        err = kthread_stop(tsk);
+
+               put_task_struct(tsk);
        }
 
        mutex_lock(&i915->drm.struct_mutex);
index 1aea7a8f2224a325652448966e59f0974c9a0e10..94fc0e5c8766ae1410dfd82c8cfb21a46d76c61f 100644 (file)
@@ -6,6 +6,7 @@
 
 #include "../i915_selftest.h"
 #include "igt_flush_test.h"
+#include "i915_random.h"
 
 #include "mock_context.h"
 
@@ -48,7 +49,7 @@ static int spinner_init(struct spinner *spin, struct drm_i915_private *i915)
        }
        spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
 
-       mode = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
+       mode = i915_coherent_map_type(i915);
        vaddr = i915_gem_object_pin_map(spin->obj, mode);
        if (IS_ERR(vaddr)) {
                err = PTR_ERR(vaddr);
@@ -291,12 +292,14 @@ static int live_preempt(void *arg)
        ctx_hi = kernel_context(i915);
        if (!ctx_hi)
                goto err_spin_lo;
-       ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
+       ctx_hi->sched.priority =
+               I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
 
        ctx_lo = kernel_context(i915);
        if (!ctx_lo)
                goto err_ctx_hi;
-       ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
+       ctx_lo->sched.priority =
+               I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
 
        for_each_engine(engine, i915, id) {
                struct i915_request *rq;
@@ -417,7 +420,7 @@ static int live_late_preempt(void *arg)
                        goto err_wedged;
                }
 
-               attr.priority = I915_PRIORITY_MAX;
+               attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
                engine->schedule(rq, &attr);
 
                if (!wait_for_spinner(&spin_hi, rq)) {
@@ -573,6 +576,261 @@ err_unlock:
        return err;
 }
 
+static int random_range(struct rnd_state *rnd, int min, int max)
+{
+       return i915_prandom_u32_max_state(max - min, rnd) + min;
+}
+
+static int random_priority(struct rnd_state *rnd)
+{
+       return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
+}
+
+struct preempt_smoke {
+       struct drm_i915_private *i915;
+       struct i915_gem_context **contexts;
+       struct intel_engine_cs *engine;
+       struct drm_i915_gem_object *batch;
+       unsigned int ncontext;
+       struct rnd_state prng;
+       unsigned long count;
+};
+
+static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
+{
+       return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
+                                                         &smoke->prng)];
+}
+
+static int smoke_submit(struct preempt_smoke *smoke,
+                       struct i915_gem_context *ctx, int prio,
+                       struct drm_i915_gem_object *batch)
+{
+       struct i915_request *rq;
+       struct i915_vma *vma = NULL;
+       int err = 0;
+
+       if (batch) {
+               vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
+               if (IS_ERR(vma))
+                       return PTR_ERR(vma);
+
+               err = i915_vma_pin(vma, 0, 0, PIN_USER);
+               if (err)
+                       return err;
+       }
+
+       ctx->sched.priority = prio;
+
+       rq = i915_request_alloc(smoke->engine, ctx);
+       if (IS_ERR(rq)) {
+               err = PTR_ERR(rq);
+               goto unpin;
+       }
+
+       if (vma) {
+               err = rq->engine->emit_bb_start(rq,
+                                               vma->node.start,
+                                               PAGE_SIZE, 0);
+               if (!err)
+                       err = i915_vma_move_to_active(vma, rq, 0);
+       }
+
+       i915_request_add(rq);
+
+unpin:
+       if (vma)
+               i915_vma_unpin(vma);
+
+       return err;
+}
+
+static int smoke_crescendo_thread(void *arg)
+{
+       struct preempt_smoke *smoke = arg;
+       IGT_TIMEOUT(end_time);
+       unsigned long count;
+
+       count = 0;
+       do {
+               struct i915_gem_context *ctx = smoke_context(smoke);
+               int err;
+
+               mutex_lock(&smoke->i915->drm.struct_mutex);
+               err = smoke_submit(smoke,
+                                  ctx, count % I915_PRIORITY_MAX,
+                                  smoke->batch);
+               mutex_unlock(&smoke->i915->drm.struct_mutex);
+               if (err)
+                       return err;
+
+               count++;
+       } while (!__igt_timeout(end_time, NULL));
+
+       smoke->count = count;
+       return 0;
+}
+
+static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
+#define BATCH BIT(0)
+{
+       struct task_struct *tsk[I915_NUM_ENGINES] = {};
+       struct preempt_smoke arg[I915_NUM_ENGINES];
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       unsigned long count;
+       int err = 0;
+
+       mutex_unlock(&smoke->i915->drm.struct_mutex);
+
+       for_each_engine(engine, smoke->i915, id) {
+               arg[id] = *smoke;
+               arg[id].engine = engine;
+               if (!(flags & BATCH))
+                       arg[id].batch = NULL;
+               arg[id].count = 0;
+
+               tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
+                                     "igt/smoke:%d", id);
+               if (IS_ERR(tsk[id])) {
+                       err = PTR_ERR(tsk[id]);
+                       break;
+               }
+               get_task_struct(tsk[id]);
+       }
+
+       count = 0;
+       for_each_engine(engine, smoke->i915, id) {
+               int status;
+
+               if (IS_ERR_OR_NULL(tsk[id]))
+                       continue;
+
+               status = kthread_stop(tsk[id]);
+               if (status && !err)
+                       err = status;
+
+               count += arg[id].count;
+
+               put_task_struct(tsk[id]);
+       }
+
+       mutex_lock(&smoke->i915->drm.struct_mutex);
+
+       pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
+               count, flags,
+               INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
+       return 0;
+}
+
+static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
+{
+       enum intel_engine_id id;
+       IGT_TIMEOUT(end_time);
+       unsigned long count;
+
+       count = 0;
+       do {
+               for_each_engine(smoke->engine, smoke->i915, id) {
+                       struct i915_gem_context *ctx = smoke_context(smoke);
+                       int err;
+
+                       err = smoke_submit(smoke,
+                                          ctx, random_priority(&smoke->prng),
+                                          flags & BATCH ? smoke->batch : NULL);
+                       if (err)
+                               return err;
+
+                       count++;
+               }
+       } while (!__igt_timeout(end_time, NULL));
+
+       pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
+               count, flags,
+               INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
+       return 0;
+}
+
+static int live_preempt_smoke(void *arg)
+{
+       struct preempt_smoke smoke = {
+               .i915 = arg,
+               .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
+               .ncontext = 1024,
+       };
+       const unsigned int phase[] = { 0, BATCH };
+       int err = -ENOMEM;
+       u32 *cs;
+       int n;
+
+       if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
+               return 0;
+
+       smoke.contexts = kmalloc_array(smoke.ncontext,
+                                      sizeof(*smoke.contexts),
+                                      GFP_KERNEL);
+       if (!smoke.contexts)
+               return -ENOMEM;
+
+       mutex_lock(&smoke.i915->drm.struct_mutex);
+       intel_runtime_pm_get(smoke.i915);
+
+       smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
+       if (IS_ERR(smoke.batch)) {
+               err = PTR_ERR(smoke.batch);
+               goto err_unlock;
+       }
+
+       cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
+       if (IS_ERR(cs)) {
+               err = PTR_ERR(cs);
+               goto err_batch;
+       }
+       for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
+               cs[n] = MI_ARB_CHECK;
+       cs[n] = MI_BATCH_BUFFER_END;
+       i915_gem_object_unpin_map(smoke.batch);
+
+       err = i915_gem_object_set_to_gtt_domain(smoke.batch, false);
+       if (err)
+               goto err_batch;
+
+       for (n = 0; n < smoke.ncontext; n++) {
+               smoke.contexts[n] = kernel_context(smoke.i915);
+               if (!smoke.contexts[n])
+                       goto err_ctx;
+       }
+
+       for (n = 0; n < ARRAY_SIZE(phase); n++) {
+               err = smoke_crescendo(&smoke, phase[n]);
+               if (err)
+                       goto err_ctx;
+
+               err = smoke_random(&smoke, phase[n]);
+               if (err)
+                       goto err_ctx;
+       }
+
+err_ctx:
+       if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED))
+               err = -EIO;
+
+       for (n = 0; n < smoke.ncontext; n++) {
+               if (!smoke.contexts[n])
+                       break;
+               kernel_context_close(smoke.contexts[n]);
+       }
+
+err_batch:
+       i915_gem_object_put(smoke.batch);
+err_unlock:
+       intel_runtime_pm_put(smoke.i915);
+       mutex_unlock(&smoke.i915->drm.struct_mutex);
+       kfree(smoke.contexts);
+
+       return err;
+}
+
 int intel_execlists_live_selftests(struct drm_i915_private *i915)
 {
        static const struct i915_subtest tests[] = {
@@ -580,6 +838,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
                SUBTEST(live_preempt),
                SUBTEST(live_late_preempt),
                SUBTEST(live_preempt_hang),
+               SUBTEST(live_preempt_smoke),
        };
 
        if (!HAS_EXECLISTS(i915))
index 22a73da45ad58b9bfae36cd823c6a934c4262c49..d0c44c18db429cd064c55dd51439ef01087c5779 100644 (file)
@@ -200,7 +200,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
        engine->base.submit_request = mock_submit_request;
 
        i915_timeline_init(i915, &engine->base.timeline, engine->base.name);
-       lockdep_set_subclass(&engine->base.timeline.lock, TIMELINE_ENGINE);
+       i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE);
 
        intel_engine_init_breadcrumbs(&engine->base);
        engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */
index 435a2c35ee8c4acd46d9f3fccd3c6dc700ad8b8d..361e962a7969044aba1c8464096edfa493b4d0c7 100644 (file)
@@ -206,39 +206,6 @@ static const struct mipi_dsi_host_ops intel_dsi_host_ops = {
        .transfer = intel_dsi_host_transfer,
 };
 
-static struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
-                                                 enum port port)
-{
-       struct intel_dsi_host *host;
-       struct mipi_dsi_device *device;
-
-       host = kzalloc(sizeof(*host), GFP_KERNEL);
-       if (!host)
-               return NULL;
-
-       host->base.ops = &intel_dsi_host_ops;
-       host->intel_dsi = intel_dsi;
-       host->port = port;
-
-       /*
-        * We should call mipi_dsi_host_register(&host->base) here, but we don't
-        * have a host->dev, and we don't have OF stuff either. So just use the
-        * dsi framework as a library and hope for the best. Create the dsi
-        * devices by ourselves here too. Need to be careful though, because we
-        * don't initialize any of the driver model devices here.
-        */
-       device = kzalloc(sizeof(*device), GFP_KERNEL);
-       if (!device) {
-               kfree(host);
-               return NULL;
-       }
-
-       device->host = &host->base;
-       host->device = device;
-
-       return host;
-}
-
 /*
  * send a video mode command
  *
@@ -290,16 +257,6 @@ static void band_gap_reset(struct drm_i915_private *dev_priv)
        mutex_unlock(&dev_priv->sb_lock);
 }
 
-static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
-{
-       return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
-}
-
-static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
-{
-       return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
-}
-
 static bool intel_dsi_compute_config(struct intel_encoder *encoder,
                                     struct intel_crtc_state *pipe_config,
                                     struct drm_connector_state *conn_state)
@@ -314,6 +271,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
        int ret;
 
        DRM_DEBUG_KMS("\n");
+       pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
 
        if (fixed_mode) {
                intel_fixed_panel_mode(fixed_mode, adjusted_mode);
@@ -745,17 +703,6 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
                              const struct intel_crtc_state *pipe_config);
 static void intel_dsi_unprepare(struct intel_encoder *encoder);
 
-static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
-{
-       struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
-
-       /* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
-       if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3)
-               return;
-
-       msleep(msec);
-}
-
 /*
  * Panel enable/disable sequences from the VBT spec.
  *
@@ -793,6 +740,10 @@ static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
  * - wait t4                                           - wait t4
  */
 
+/*
+ * DSI port enable has to be done before pipe and plane enable, so we do it in
+ * the pre_enable hook instead of the enable hook.
+ */
 static void intel_dsi_pre_enable(struct intel_encoder *encoder,
                                 const struct intel_crtc_state *pipe_config,
                                 const struct drm_connector_state *conn_state)
@@ -894,17 +845,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
        intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
 }
 
-/*
- * DSI port enable has to be done before pipe and plane enable, so we do it in
- * the pre_enable hook.
- */
-static void intel_dsi_enable_nop(struct intel_encoder *encoder,
-                                const struct intel_crtc_state *pipe_config,
-                                const struct drm_connector_state *conn_state)
-{
-       DRM_DEBUG_KMS("\n");
-}
-
 /*
  * DSI port disable has to be done after pipe and plane disable, so we do it in
  * the post_disable hook.
@@ -1272,31 +1212,6 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
        }
 }
 
-static enum drm_mode_status
-intel_dsi_mode_valid(struct drm_connector *connector,
-                    struct drm_display_mode *mode)
-{
-       struct intel_connector *intel_connector = to_intel_connector(connector);
-       const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
-       int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
-
-       DRM_DEBUG_KMS("\n");
-
-       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
-               return MODE_NO_DBLESCAN;
-
-       if (fixed_mode) {
-               if (mode->hdisplay > fixed_mode->hdisplay)
-                       return MODE_PANEL;
-               if (mode->vdisplay > fixed_mode->vdisplay)
-                       return MODE_PANEL;
-               if (fixed_mode->clock > max_dotclk)
-                       return MODE_CLOCK_HIGH;
-       }
-
-       return MODE_OK;
-}
-
 /* return txclkesc cycles in terms of divider and duration in us */
 static u16 txclkesc(u32 divider, unsigned int us)
 {
@@ -1619,39 +1534,6 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
        }
 }
 
-static int intel_dsi_get_modes(struct drm_connector *connector)
-{
-       struct intel_connector *intel_connector = to_intel_connector(connector);
-       struct drm_display_mode *mode;
-
-       DRM_DEBUG_KMS("\n");
-
-       if (!intel_connector->panel.fixed_mode) {
-               DRM_DEBUG_KMS("no fixed mode\n");
-               return 0;
-       }
-
-       mode = drm_mode_duplicate(connector->dev,
-                                 intel_connector->panel.fixed_mode);
-       if (!mode) {
-               DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
-               return 0;
-       }
-
-       drm_mode_probed_add(connector, mode);
-       return 1;
-}
-
-static void intel_dsi_connector_destroy(struct drm_connector *connector)
-{
-       struct intel_connector *intel_connector = to_intel_connector(connector);
-
-       DRM_DEBUG_KMS("\n");
-       intel_panel_fini(&intel_connector->panel);
-       drm_connector_cleanup(connector);
-       kfree(connector);
-}
-
 static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
 {
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
@@ -1676,7 +1558,7 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs
 static const struct drm_connector_funcs intel_dsi_connector_funcs = {
        .late_register = intel_connector_register,
        .early_unregister = intel_connector_unregister,
-       .destroy = intel_dsi_connector_destroy,
+       .destroy = intel_connector_destroy,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .atomic_get_property = intel_digital_connector_atomic_get_property,
        .atomic_set_property = intel_digital_connector_atomic_set_property,
@@ -1684,27 +1566,57 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
        .atomic_duplicate_state = intel_digital_connector_duplicate_state,
 };
 
-static int intel_dsi_get_panel_orientation(struct intel_connector *connector)
+static enum drm_panel_orientation
+vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-       int orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
-       enum i9xx_plane_id i9xx_plane;
+       struct intel_encoder *encoder = connector->encoder;
+       enum intel_display_power_domain power_domain;
+       enum drm_panel_orientation orientation;
+       struct intel_plane *plane;
+       struct intel_crtc *crtc;
+       enum pipe pipe;
        u32 val;
 
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-               if (connector->encoder->crtc_mask == BIT(PIPE_B))
-                       i9xx_plane = PLANE_B;
-               else
-                       i9xx_plane = PLANE_A;
+       if (!encoder->get_hw_state(encoder, &pipe))
+               return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
 
-               val = I915_READ(DSPCNTR(i9xx_plane));
-               if (val & DISPPLANE_ROTATE_180)
-                       orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
-       }
+       crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+       plane = to_intel_plane(crtc->base.primary);
+
+       power_domain = POWER_DOMAIN_PIPE(pipe);
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+               return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+
+       val = I915_READ(DSPCNTR(plane->i9xx_plane));
+
+       if (!(val & DISPLAY_PLANE_ENABLE))
+               orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+       else if (val & DISPPLANE_ROTATE_180)
+               orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
+       else
+               orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
+
+       intel_display_power_put(dev_priv, power_domain);
 
        return orientation;
 }
 
+static enum drm_panel_orientation
+vlv_dsi_get_panel_orientation(struct intel_connector *connector)
+{
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       enum drm_panel_orientation orientation;
+
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+               orientation = vlv_dsi_get_hw_panel_orientation(connector);
+               if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+                       return orientation;
+       }
+
+       return intel_dsi_get_panel_orientation(connector);
+}
+
 static void intel_dsi_add_properties(struct intel_connector *connector)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1722,7 +1634,7 @@ static void intel_dsi_add_properties(struct intel_connector *connector)
                connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
 
                connector->base.display_info.panel_orientation =
-                       intel_dsi_get_panel_orientation(connector);
+                       vlv_dsi_get_panel_orientation(connector);
                drm_connector_init_panel_orientation_property(
                                &connector->base,
                                connector->panel.fixed_mode->hdisplay,
@@ -1773,7 +1685,6 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
 
        intel_encoder->compute_config = intel_dsi_compute_config;
        intel_encoder->pre_enable = intel_dsi_pre_enable;
-       intel_encoder->enable = intel_dsi_enable_nop;
        intel_encoder->disable = intel_dsi_disable;
        intel_encoder->post_disable = intel_dsi_post_disable;
        intel_encoder->get_hw_state = intel_dsi_get_hw_state;
@@ -1806,7 +1717,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
        for_each_dsi_port(port, intel_dsi->ports) {
                struct intel_dsi_host *host;
 
-               host = intel_dsi_host_init(intel_dsi, port);
+               host = intel_dsi_host_init(intel_dsi, &intel_dsi_host_ops,
+                                          port);
                if (!host)
                        goto err;
 
index bcffe8ea642c5977711e65a91aa67fa1ec548070..e95e0e7a7fa1f3ae6a58be66e4ca0874e7b1ca99 100644 (file)
@@ -983,6 +983,13 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
        unsigned int sof_lines;
        unsigned int vsync_lines;
 
+       /* Use VENCI for 480i and 576i and double HDMI pixels */
+       if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
+               hdmi_repeat = true;
+               use_enci = true;
+               venc_hdmi_latency = 1;
+       }
+
        if (meson_venc_hdmi_supported_vic(vic)) {
                vmode = meson_venc_hdmi_get_vic_vmode(vic);
                if (!vmode) {
@@ -994,13 +1001,7 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
        } else {
                meson_venc_hdmi_get_dmt_vmode(mode, &vmode_dmt);
                vmode = &vmode_dmt;
-       }
-
-       /* Use VENCI for 480i and 576i and double HDMI pixels */
-       if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
-               hdmi_repeat = true;
-               use_enci = true;
-               venc_hdmi_latency = 1;
+               use_enci = false;
        }
 
        /* Repeat VENC pixels for 480/576i/p, 720p50/60 and 1080p50/60 */
index 04f1dfba12e5993ead71b7ef1639ac208c0be938..0aaedc5548798ec32dd7133e3041ee8651cd3270 100644 (file)
@@ -212,8 +212,6 @@ struct mga_device {
        int fb_mtrr;
 
        struct {
-               struct drm_global_reference mem_global_ref;
-               struct ttm_bo_global_ref bo_global_ref;
                struct ttm_bo_device bdev;
        } ttm;
 
index 05570f0de4d7118ed1c395a93b873451420be467..d96a9b32455e6f5814e7454f482e62068a557b1f 100644 (file)
@@ -36,63 +36,6 @@ mgag200_bdev(struct ttm_bo_device *bd)
        return container_of(bd, struct mga_device, ttm.bdev);
 }
 
-static int
-mgag200_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-       return ttm_mem_global_init(ref->object);
-}
-
-static void
-mgag200_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-       ttm_mem_global_release(ref->object);
-}
-
-static int mgag200_ttm_global_init(struct mga_device *ast)
-{
-       struct drm_global_reference *global_ref;
-       int r;
-
-       global_ref = &ast->ttm.mem_global_ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-       global_ref->size = sizeof(struct ttm_mem_global);
-       global_ref->init = &mgag200_ttm_mem_global_init;
-       global_ref->release = &mgag200_ttm_mem_global_release;
-       r = drm_global_item_ref(global_ref);
-       if (r != 0) {
-               DRM_ERROR("Failed setting up TTM memory accounting "
-                         "subsystem.\n");
-               return r;
-       }
-
-       ast->ttm.bo_global_ref.mem_glob =
-               ast->ttm.mem_global_ref.object;
-       global_ref = &ast->ttm.bo_global_ref.ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_BO;
-       global_ref->size = sizeof(struct ttm_bo_global);
-       global_ref->init = &ttm_bo_global_init;
-       global_ref->release = &ttm_bo_global_release;
-       r = drm_global_item_ref(global_ref);
-       if (r != 0) {
-               DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-               drm_global_item_unref(&ast->ttm.mem_global_ref);
-               return r;
-       }
-       return 0;
-}
-
-static void
-mgag200_ttm_global_release(struct mga_device *ast)
-{
-       if (ast->ttm.mem_global_ref.release == NULL)
-               return;
-
-       drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
-       drm_global_item_unref(&ast->ttm.mem_global_ref);
-       ast->ttm.mem_global_ref.release = NULL;
-}
-
-
 static void mgag200_bo_ttm_destroy(struct ttm_buffer_object *tbo)
 {
        struct mgag200_bo *bo;
@@ -232,12 +175,7 @@ int mgag200_mm_init(struct mga_device *mdev)
        struct drm_device *dev = mdev->dev;
        struct ttm_bo_device *bdev = &mdev->ttm.bdev;
 
-       ret = mgag200_ttm_global_init(mdev);
-       if (ret)
-               return ret;
-
        ret = ttm_bo_device_init(&mdev->ttm.bdev,
-                                mdev->ttm.bo_global_ref.ref.object,
                                 &mgag200_bo_driver,
                                 dev->anon_inode->i_mapping,
                                 DRM_FILE_PAGE_OFFSET,
@@ -268,8 +206,6 @@ void mgag200_mm_fini(struct mga_device *mdev)
 
        ttm_bo_device_release(&mdev->ttm.bdev);
 
-       mgag200_ttm_global_release(mdev);
-
        arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
                                pci_resource_len(dev->pdev, 0));
        arch_phys_wc_del(mdev->fb_mtrr);
index 0b2191fa96f7bc288e28e2cb6df141422882ef1a..d20b9ba4b1c1a60d6ab942708479f2fdb15ccaa5 100644 (file)
@@ -146,8 +146,6 @@ struct nouveau_drm {
 
        /* TTM interface support */
        struct {
-               struct drm_global_reference mem_global_ref;
-               struct ttm_bo_global_ref bo_global_ref;
                struct ttm_bo_device bdev;
                atomic_t validate_sequence;
                int (*move)(struct nouveau_channel *,
index 8edb9f2a426945be9bf88ff167cc9ccebfa2171b..1543c2f8d3d3312f267e17c03168fab29370aa77 100644 (file)
@@ -174,66 +174,6 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
        return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
 }
 
-static int
-nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-       return ttm_mem_global_init(ref->object);
-}
-
-static void
-nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-       ttm_mem_global_release(ref->object);
-}
-
-int
-nouveau_ttm_global_init(struct nouveau_drm *drm)
-{
-       struct drm_global_reference *global_ref;
-       int ret;
-
-       global_ref = &drm->ttm.mem_global_ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-       global_ref->size = sizeof(struct ttm_mem_global);
-       global_ref->init = &nouveau_ttm_mem_global_init;
-       global_ref->release = &nouveau_ttm_mem_global_release;
-
-       ret = drm_global_item_ref(global_ref);
-       if (unlikely(ret != 0)) {
-               DRM_ERROR("Failed setting up TTM memory accounting\n");
-               drm->ttm.mem_global_ref.release = NULL;
-               return ret;
-       }
-
-       drm->ttm.bo_global_ref.mem_glob = global_ref->object;
-       global_ref = &drm->ttm.bo_global_ref.ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_BO;
-       global_ref->size = sizeof(struct ttm_bo_global);
-       global_ref->init = &ttm_bo_global_init;
-       global_ref->release = &ttm_bo_global_release;
-
-       ret = drm_global_item_ref(global_ref);
-       if (unlikely(ret != 0)) {
-               DRM_ERROR("Failed setting up TTM BO subsystem\n");
-               drm_global_item_unref(&drm->ttm.mem_global_ref);
-               drm->ttm.mem_global_ref.release = NULL;
-               return ret;
-       }
-
-       return 0;
-}
-
-void
-nouveau_ttm_global_release(struct nouveau_drm *drm)
-{
-       if (drm->ttm.mem_global_ref.release == NULL)
-               return;
-
-       drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
-       drm_global_item_unref(&drm->ttm.mem_global_ref);
-       drm->ttm.mem_global_ref.release = NULL;
-}
-
 static int
 nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
 {
@@ -296,12 +236,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
                drm->agp.cma = pci->agp.cma;
        }
 
-       ret = nouveau_ttm_global_init(drm);
-       if (ret)
-               return ret;
-
        ret = ttm_bo_device_init(&drm->ttm.bdev,
-                                 drm->ttm.bo_global_ref.ref.object,
                                  &nouveau_bo_driver,
                                  dev->anon_inode->i_mapping,
                                  DRM_FILE_PAGE_OFFSET,
@@ -356,8 +291,6 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
 
        ttm_bo_device_release(&drm->ttm.bdev);
 
-       nouveau_ttm_global_release(drm);
-
        arch_phys_wc_del(drm->ttm.mtrr);
        drm->ttm.mtrr = 0;
        arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
index 394c129cfb3bb8e03b8970fc839656e3d25becd5..0a485c5b982eb84addaf013dd8241509ca403d04 100644 (file)
@@ -5409,11 +5409,14 @@ static int dsi_probe(struct platform_device *pdev)
 
        /* DSI on OMAP3 doesn't have register DSI_GNQ, set number
         * of data to 3 by default */
-       if (dsi->data->quirks & DSI_QUIRK_GNQ)
+       if (dsi->data->quirks & DSI_QUIRK_GNQ) {
+               dsi_runtime_get(dsi);
                /* NB_DATA_LANES */
                dsi->num_lanes_supported = 1 + REG_GET(dsi, DSI_GNQ, 11, 9);
-       else
+               dsi_runtime_put(dsi);
+       } else {
                dsi->num_lanes_supported = 3;
+       }
 
        r = dsi_init_output(dsi);
        if (r)
@@ -5426,15 +5429,19 @@ static int dsi_probe(struct platform_device *pdev)
        }
 
        r = of_platform_populate(dev->of_node, NULL, NULL, dev);
-       if (r)
+       if (r) {
                DSSERR("Failed to populate DSI child devices: %d\n", r);
+               goto err_uninit_output;
+       }
 
        r = component_add(&pdev->dev, &dsi_component_ops);
        if (r)
-               goto err_uninit_output;
+               goto err_of_depopulate;
 
        return 0;
 
+err_of_depopulate:
+       of_platform_depopulate(dev);
 err_uninit_output:
        dsi_uninit_output(dsi);
 err_pm_disable:
@@ -5470,19 +5477,12 @@ static int dsi_runtime_suspend(struct device *dev)
        /* wait for current handler to finish before turning the DSI off */
        synchronize_irq(dsi->irq);
 
-       dispc_runtime_put(dsi->dss->dispc);
-
        return 0;
 }
 
 static int dsi_runtime_resume(struct device *dev)
 {
        struct dsi_data *dsi = dev_get_drvdata(dev);
-       int r;
-
-       r = dispc_runtime_get(dsi->dss->dispc);
-       if (r)
-               return r;
 
        dsi->is_enabled = true;
        /* ensure the irq handler sees the is_enabled value */
index 1aaf260aa9b8638d2e7fac7aaa36ed3fe14a0880..7553c7fc1c457f23bb456046c17408ba89fc9d24 100644 (file)
@@ -1484,16 +1484,23 @@ static int dss_probe(struct platform_device *pdev)
                                                   dss);
 
        /* Add all the child devices as components. */
+       r = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+       if (r)
+               goto err_uninit_debugfs;
+
        omapdss_gather_components(&pdev->dev);
 
        device_for_each_child(&pdev->dev, &match, dss_add_child_component);
 
        r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match);
        if (r)
-               goto err_uninit_debugfs;
+               goto err_of_depopulate;
 
        return 0;
 
+err_of_depopulate:
+       of_platform_depopulate(&pdev->dev);
+
 err_uninit_debugfs:
        dss_debugfs_remove_file(dss->debugfs.clk);
        dss_debugfs_remove_file(dss->debugfs.dss);
@@ -1522,6 +1529,8 @@ static int dss_remove(struct platform_device *pdev)
 {
        struct dss_device *dss = platform_get_drvdata(pdev);
 
+       of_platform_depopulate(&pdev->dev);
+
        component_master_del(&pdev->dev, &dss_component_ops);
 
        dss_debugfs_remove_file(dss->debugfs.clk);
index cf6230eac31a3cffb37f62cb3e7b79ec9a6bb552..aabdda394c9c6f4cf7f93eb0f8e0f9a6126262d1 100644 (file)
@@ -635,10 +635,14 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
 
        hdmi->dss = dss;
 
-       r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp);
+       r = hdmi_runtime_get(hdmi);
        if (r)
                return r;
 
+       r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp);
+       if (r)
+               goto err_runtime_put;
+
        r = hdmi4_cec_init(hdmi->pdev, &hdmi->core, &hdmi->wp);
        if (r)
                goto err_pll_uninit;
@@ -652,12 +656,16 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
        hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs,
                                               hdmi);
 
+       hdmi_runtime_put(hdmi);
+
        return 0;
 
 err_cec_uninit:
        hdmi4_cec_uninit(&hdmi->core);
 err_pll_uninit:
        hdmi_pll_uninit(&hdmi->pll);
+err_runtime_put:
+       hdmi_runtime_put(hdmi);
        return r;
 }
 
@@ -833,32 +841,6 @@ static int hdmi4_remove(struct platform_device *pdev)
        return 0;
 }
 
-static int hdmi_runtime_suspend(struct device *dev)
-{
-       struct omap_hdmi *hdmi = dev_get_drvdata(dev);
-
-       dispc_runtime_put(hdmi->dss->dispc);
-
-       return 0;
-}
-
-static int hdmi_runtime_resume(struct device *dev)
-{
-       struct omap_hdmi *hdmi = dev_get_drvdata(dev);
-       int r;
-
-       r = dispc_runtime_get(hdmi->dss->dispc);
-       if (r < 0)
-               return r;
-
-       return 0;
-}
-
-static const struct dev_pm_ops hdmi_pm_ops = {
-       .runtime_suspend = hdmi_runtime_suspend,
-       .runtime_resume = hdmi_runtime_resume,
-};
-
 static const struct of_device_id hdmi_of_match[] = {
        { .compatible = "ti,omap4-hdmi", },
        {},
@@ -869,7 +851,6 @@ struct platform_driver omapdss_hdmi4hw_driver = {
        .remove         = hdmi4_remove,
        .driver         = {
                .name   = "omapdss_hdmi",
-               .pm     = &hdmi_pm_ops,
                .of_match_table = hdmi_of_match,
                .suppress_bind_attrs = true,
        },
index b0e4a7463f8c88517fcb398a049a8f5355df6dc9..9e8556f67a2914aed8ed1b71409956c2bcc07057 100644 (file)
@@ -825,32 +825,6 @@ static int hdmi5_remove(struct platform_device *pdev)
        return 0;
 }
 
-static int hdmi_runtime_suspend(struct device *dev)
-{
-       struct omap_hdmi *hdmi = dev_get_drvdata(dev);
-
-       dispc_runtime_put(hdmi->dss->dispc);
-
-       return 0;
-}
-
-static int hdmi_runtime_resume(struct device *dev)
-{
-       struct omap_hdmi *hdmi = dev_get_drvdata(dev);
-       int r;
-
-       r = dispc_runtime_get(hdmi->dss->dispc);
-       if (r < 0)
-               return r;
-
-       return 0;
-}
-
-static const struct dev_pm_ops hdmi_pm_ops = {
-       .runtime_suspend = hdmi_runtime_suspend,
-       .runtime_resume = hdmi_runtime_resume,
-};
-
 static const struct of_device_id hdmi_of_match[] = {
        { .compatible = "ti,omap5-hdmi", },
        { .compatible = "ti,dra7-hdmi", },
@@ -862,7 +836,6 @@ struct platform_driver omapdss_hdmi5hw_driver = {
        .remove         = hdmi5_remove,
        .driver         = {
                .name   = "omapdss_hdmi5",
-               .pm     = &hdmi_pm_ops,
                .of_match_table = hdmi_of_match,
                .suppress_bind_attrs = true,
        },
index ff0b18c8e4acedc4d2e310d5377a789fecaaf9a2..b5f52727f8b17237f52bbad92e170a488e27b396 100644 (file)
@@ -946,19 +946,12 @@ static int venc_runtime_suspend(struct device *dev)
        if (venc->tv_dac_clk)
                clk_disable_unprepare(venc->tv_dac_clk);
 
-       dispc_runtime_put(venc->dss->dispc);
-
        return 0;
 }
 
 static int venc_runtime_resume(struct device *dev)
 {
        struct venc_device *venc = dev_get_drvdata(dev);
-       int r;
-
-       r = dispc_runtime_get(venc->dss->dispc);
-       if (r < 0)
-               return r;
 
        if (venc->tv_dac_clk)
                clk_prepare_enable(venc->tv_dac_clk);
index 62928ec0e7db7a6fd6c53d1801c62a19dae80758..caffc547ef97e385cb913f77fd3ebe55a082d486 100644 (file)
@@ -350,11 +350,14 @@ static void omap_crtc_arm_event(struct drm_crtc *crtc)
 static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
                                    struct drm_crtc_state *old_state)
 {
+       struct omap_drm_private *priv = crtc->dev->dev_private;
        struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
        int ret;
 
        DBG("%s", omap_crtc->name);
 
+       priv->dispc_ops->runtime_get(priv->dispc);
+
        spin_lock_irq(&crtc->dev->event_lock);
        drm_crtc_vblank_on(crtc);
        ret = drm_crtc_vblank_get(crtc);
@@ -367,6 +370,7 @@ static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
 static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
                                     struct drm_crtc_state *old_state)
 {
+       struct omap_drm_private *priv = crtc->dev->dev_private;
        struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
 
        DBG("%s", omap_crtc->name);
@@ -379,6 +383,8 @@ static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
        spin_unlock_irq(&crtc->dev->event_lock);
 
        drm_crtc_vblank_off(crtc);
+
+       priv->dispc_ops->runtime_put(priv->dispc);
 }
 
 static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc,
index 14d3fa855708f544d459b64b23c611e18e7d01cf..13a0254b59a1a55fd09663dd32170b478b9513a3 100644 (file)
@@ -126,9 +126,6 @@ struct qxl_output {
 #define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc)
 
 struct qxl_mman {
-       struct ttm_bo_global_ref        bo_global_ref;
-       struct drm_global_reference     mem_global_ref;
-       unsigned int mem_global_referenced:1;
        struct ttm_bo_device            bdev;
 };
 
index 559a101138379192fa2310bb178ba5643162d9f0..886f61e94f24470c37feaf224974fb28d43d2e5b 100644 (file)
@@ -46,62 +46,6 @@ static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
        return qdev;
 }
 
-static int qxl_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-       return ttm_mem_global_init(ref->object);
-}
-
-static void qxl_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-       ttm_mem_global_release(ref->object);
-}
-
-static int qxl_ttm_global_init(struct qxl_device *qdev)
-{
-       struct drm_global_reference *global_ref;
-       int r;
-
-       qdev->mman.mem_global_referenced = false;
-       global_ref = &qdev->mman.mem_global_ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-       global_ref->size = sizeof(struct ttm_mem_global);
-       global_ref->init = &qxl_ttm_mem_global_init;
-       global_ref->release = &qxl_ttm_mem_global_release;
-
-       r = drm_global_item_ref(global_ref);
-       if (r != 0) {
-               DRM_ERROR("Failed setting up TTM memory accounting "
-                         "subsystem.\n");
-               return r;
-       }
-
-       qdev->mman.bo_global_ref.mem_glob =
-               qdev->mman.mem_global_ref.object;
-       global_ref = &qdev->mman.bo_global_ref.ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_BO;
-       global_ref->size = sizeof(struct ttm_bo_global);
-       global_ref->init = &ttm_bo_global_init;
-       global_ref->release = &ttm_bo_global_release;
-       r = drm_global_item_ref(global_ref);
-       if (r != 0) {
-               DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-               drm_global_item_unref(&qdev->mman.mem_global_ref);
-               return r;
-       }
-
-       qdev->mman.mem_global_referenced = true;
-       return 0;
-}
-
-static void qxl_ttm_global_fini(struct qxl_device *qdev)
-{
-       if (qdev->mman.mem_global_referenced) {
-               drm_global_item_unref(&qdev->mman.bo_global_ref.ref);
-               drm_global_item_unref(&qdev->mman.mem_global_ref);
-               qdev->mman.mem_global_referenced = false;
-       }
-}
-
 static struct vm_operations_struct qxl_ttm_vm_ops;
 static const struct vm_operations_struct *ttm_vm_ops;
 
@@ -372,12 +316,8 @@ int qxl_ttm_init(struct qxl_device *qdev)
        int r;
        int num_io_pages; /* != rom->num_io_pages, we include surface0 */
 
-       r = qxl_ttm_global_init(qdev);
-       if (r)
-               return r;
        /* No others user of address space so set it to 0 */
        r = ttm_bo_device_init(&qdev->mman.bdev,
-                              qdev->mman.bo_global_ref.ref.object,
                               &qxl_bo_driver,
                               qdev->ddev.anon_inode->i_mapping,
                               DRM_FILE_PAGE_OFFSET, 0);
@@ -413,7 +353,6 @@ void qxl_ttm_fini(struct qxl_device *qdev)
        ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
        ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV);
        ttm_bo_device_release(&qdev->mman.bdev);
-       qxl_ttm_global_fini(qdev);
        DRM_INFO("qxl: ttm finalized\n");
 }
 
index 21161aa8acbf202d196a0ed7949a4659f4cf8269..652126fd6dd4f4ea5c0899f43a5047b0758db555 100644 (file)
@@ -814,7 +814,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                                          ((idx_value >> 21) & 0xF));
                                return -EINVAL;
                        }
-                       /* Pass through. */
+                       /* Fall through. */
                case 6:
                        track->cb[i].cpp = 4;
                        break;
@@ -965,7 +965,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                                return -EINVAL;
                        }
                        /* The same rules apply as for DXT3/5. */
-                       /* Pass through. */
+                       /* Fall through. */
                case R300_TX_FORMAT_DXT3:
                case R300_TX_FORMAT_DXT5:
                        track->textures[i].cpp = 1;
index 45e1d4e60759f55bbe4bb03faf629a442d7417f8..2318d9e3ed96ddc3c00395149bce5fe5c73af19c 100644 (file)
@@ -109,6 +109,7 @@ void r420_pipes_init(struct radeon_device *rdev)
        default:
                /* force to 1 pipe */
                num_pipes = 1;
+               /* fall through */
        case 1:
                tmp = (0 << 1);
                break;
index 1a6f6edb3515188ea55e3ce8e2b9d2e951d0c8c7..32808e50be12f815c85178083cc49bf24d16cf89 100644 (file)
@@ -448,10 +448,7 @@ struct radeon_surface_reg {
  * TTM.
  */
 struct radeon_mman {
-       struct ttm_bo_global_ref        bo_global_ref;
-       struct drm_global_reference     mem_global_ref;
        struct ttm_bo_device            bdev;
-       bool                            mem_global_referenced;
        bool                            initialized;
 
 #if defined(CONFIG_DEBUG_FS)
index 4278272e3191d3508aa88eede6751b75daefe388..3dae2c4dec711417d1537d603b3b1dd4d95cb03f 100644 (file)
@@ -421,24 +421,14 @@ static void radeon_legacy_write_tv_restarts(struct radeon_encoder *radeon_encode
 
 static bool radeon_legacy_tv_init_restarts(struct drm_encoder *encoder)
 {
-       struct drm_device *dev = encoder->dev;
-       struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
-       struct radeon_crtc *radeon_crtc;
        int restart;
        unsigned int h_total, v_total, f_total;
        int v_offset, h_offset;
        u16 p1, p2, h_inc;
        bool h_changed;
        const struct radeon_tv_mode_constants *const_ptr;
-       struct radeon_pll *pll;
-
-       radeon_crtc = to_radeon_crtc(radeon_encoder->base.crtc);
-       if (radeon_crtc->crtc_id == 1)
-               pll = &rdev->clock.p2pll;
-       else
-               pll = &rdev->clock.p1pll;
 
        const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
        if (!const_ptr)
index 92f6d4002eea4274a2d87c75f0322372864ba5cf..833e909706a9c2ef6845abd7358d495acc728603 100644 (file)
@@ -314,11 +314,9 @@ struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
 void radeon_bo_unref(struct radeon_bo **bo)
 {
        struct ttm_buffer_object *tbo;
-       struct radeon_device *rdev;
 
        if ((*bo) == NULL)
                return;
-       rdev = (*bo)->rdev;
        tbo = &((*bo)->tbo);
        ttm_bo_put(tbo);
        *bo = NULL;
index cbb67e9ffb3a52f413564f82db87dd0ece44c37e..9920a6fc11bf3446f1e1858414d8c25fc9cc2f07 100644 (file)
@@ -60,65 +60,6 @@ static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
        return rdev;
 }
 
-
-/*
- * Global memory.
- */
-static int radeon_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-       return ttm_mem_global_init(ref->object);
-}
-
-static void radeon_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-       ttm_mem_global_release(ref->object);
-}
-
-static int radeon_ttm_global_init(struct radeon_device *rdev)
-{
-       struct drm_global_reference *global_ref;
-       int r;
-
-       rdev->mman.mem_global_referenced = false;
-       global_ref = &rdev->mman.mem_global_ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-       global_ref->size = sizeof(struct ttm_mem_global);
-       global_ref->init = &radeon_ttm_mem_global_init;
-       global_ref->release = &radeon_ttm_mem_global_release;
-       r = drm_global_item_ref(global_ref);
-       if (r != 0) {
-               DRM_ERROR("Failed setting up TTM memory accounting "
-                         "subsystem.\n");
-               return r;
-       }
-
-       rdev->mman.bo_global_ref.mem_glob =
-               rdev->mman.mem_global_ref.object;
-       global_ref = &rdev->mman.bo_global_ref.ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_BO;
-       global_ref->size = sizeof(struct ttm_bo_global);
-       global_ref->init = &ttm_bo_global_init;
-       global_ref->release = &ttm_bo_global_release;
-       r = drm_global_item_ref(global_ref);
-       if (r != 0) {
-               DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-               drm_global_item_unref(&rdev->mman.mem_global_ref);
-               return r;
-       }
-
-       rdev->mman.mem_global_referenced = true;
-       return 0;
-}
-
-static void radeon_ttm_global_fini(struct radeon_device *rdev)
-{
-       if (rdev->mman.mem_global_referenced) {
-               drm_global_item_unref(&rdev->mman.bo_global_ref.ref);
-               drm_global_item_unref(&rdev->mman.mem_global_ref);
-               rdev->mman.mem_global_referenced = false;
-       }
-}
-
 static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
 {
        return 0;
@@ -847,13 +788,8 @@ int radeon_ttm_init(struct radeon_device *rdev)
 {
        int r;
 
-       r = radeon_ttm_global_init(rdev);
-       if (r) {
-               return r;
-       }
        /* No others user of address space so set it to 0 */
        r = ttm_bo_device_init(&rdev->mman.bdev,
-                              rdev->mman.bo_global_ref.ref.object,
                               &radeon_bo_driver,
                               rdev->ddev->anon_inode->i_mapping,
                               DRM_FILE_PAGE_OFFSET,
@@ -925,7 +861,6 @@ void radeon_ttm_fini(struct radeon_device *rdev)
        ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
        ttm_bo_device_release(&rdev->mman.bdev);
        radeon_gart_fini(rdev);
-       radeon_ttm_global_fini(rdev);
        rdev->mman.initialized = false;
        DRM_INFO("radeon: ttm finalized\n");
 }
index 17741843cf519be7769047c24db32732551f7d24..90dacab67be5a8629525ed30665d58b93eeb2878 100644 (file)
@@ -226,9 +226,6 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
                 * system clock, and have no internal clock divider.
                 */
 
-               if (WARN_ON(!rcrtc->extclock))
-                       return;
-
                /*
                 * The H3 ES1.x exhibits dot clock duty cycle stability issues.
                 * We can work around them by configuring the DPLL to twice the
@@ -701,7 +698,7 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
         * CRTC will be put later in .atomic_disable().
         *
         * If a mode set is not in progress the CRTC is enabled, and the
-        * following get call will be a no-op. There is thus no need to belance
+        * following get call will be a no-op. There is thus no need to balance
         * it in .atomic_flush() either.
         */
        rcar_du_crtc_get(rcrtc);
@@ -738,10 +735,22 @@ enum drm_mode_status rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
        struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
        struct rcar_du_device *rcdu = rcrtc->group->dev;
        bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+       unsigned int vbp;
 
        if (interlaced && !rcar_du_has(rcdu, RCAR_DU_FEATURE_INTERLACED))
                return MODE_NO_INTERLACE;
 
+       /*
+        * The hardware requires a minimum combined horizontal sync and back
+        * porch of 20 pixels and a minimum vertical back porch of 3 lines.
+        */
+       if (mode->htotal - mode->hsync_start < 20)
+               return MODE_HBLANK_NARROW;
+
+       vbp = (mode->vtotal - mode->vsync_end) / (interlaced ? 2 : 1);
+       if (vbp < 3)
+               return MODE_VBLANK_NARROW;
+
        return MODE_OK;
 }
 
@@ -1002,7 +1011,7 @@ unlock:
        drm_modeset_drop_locks(&ctx);
        drm_modeset_acquire_fini(&ctx);
 
-       return 0;
+       return ret;
 }
 
 static const struct drm_crtc_funcs crtc_funcs_gen2 = {
@@ -1113,9 +1122,16 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
        clk = devm_clk_get(rcdu->dev, clk_name);
        if (!IS_ERR(clk)) {
                rcrtc->extclock = clk;
-       } else if (PTR_ERR(rcrtc->clock) == -EPROBE_DEFER) {
-               dev_info(rcdu->dev, "can't get external clock %u\n", hwindex);
+       } else if (PTR_ERR(clk) == -EPROBE_DEFER) {
                return -EPROBE_DEFER;
+       } else if (rcdu->info->dpll_mask & BIT(hwindex)) {
+               /*
+                * DU channels that have a display PLL can't use the internal
+                * system clock and thus require an external clock.
+                */
+               ret = PTR_ERR(clk);
+               dev_err(rcdu->dev, "can't get dclkin.%u: %d\n", hwindex, ret);
+               return ret;
        }
 
        init_waitqueue_head(&rcrtc->flip_wait);
index c6770043dcdcaea664b993497d07c0d8613d0fbf..94f055186b95e2d04c15707955ea029b82a013a8 100644 (file)
@@ -41,7 +41,7 @@ static const struct rcar_du_device_info rzg1_du_r8a7743_info = {
        .channels_mask = BIT(1) | BIT(0),
        .routes = {
                /*
-                * R8A7743 has one RGB output and one LVDS output
+                * R8A774[34] has one RGB output and one LVDS output
                 */
                [RCAR_DU_OUTPUT_DPAD0] = {
                        .possible_crtcs = BIT(1) | BIT(0),
@@ -77,6 +77,33 @@ static const struct rcar_du_device_info rzg1_du_r8a7745_info = {
        },
 };
 
+static const struct rcar_du_device_info rzg1_du_r8a77470_info = {
+       .gen = 2,
+       .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+                 | RCAR_DU_FEATURE_EXT_CTRL_REGS
+                 | RCAR_DU_FEATURE_INTERLACED
+                 | RCAR_DU_FEATURE_TVM_SYNC,
+       .channels_mask = BIT(1) | BIT(0),
+       .routes = {
+               /*
+                * R8A77470 has two RGB outputs, one LVDS output, and
+                * one (currently unsupported) analog video output
+                */
+               [RCAR_DU_OUTPUT_DPAD0] = {
+                       .possible_crtcs = BIT(0),
+                       .port = 0,
+               },
+               [RCAR_DU_OUTPUT_DPAD1] = {
+                       .possible_crtcs = BIT(1),
+                       .port = 1,
+               },
+               [RCAR_DU_OUTPUT_LVDS0] = {
+                       .possible_crtcs = BIT(0) | BIT(1),
+                       .port = 2,
+               },
+       },
+};
+
 static const struct rcar_du_device_info rcar_du_r8a7779_info = {
        .gen = 2,
        .features = RCAR_DU_FEATURE_INTERLACED
@@ -341,7 +368,9 @@ static const struct rcar_du_device_info rcar_du_r8a7799x_info = {
 
 static const struct of_device_id rcar_du_of_table[] = {
        { .compatible = "renesas,du-r8a7743", .data = &rzg1_du_r8a7743_info },
+       { .compatible = "renesas,du-r8a7744", .data = &rzg1_du_r8a7743_info },
        { .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info },
+       { .compatible = "renesas,du-r8a77470", .data = &rzg1_du_r8a77470_info },
        { .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
        { .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
        { .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info },
index 4ebd61ecbee177ab928d7837b28469d049f3d491..fe6f65c94eefed9e9e3d594db631439524248d77 100644 (file)
@@ -582,7 +582,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
         * Initialize vertical blanking interrupts handling. Start with vblank
         * disabled for all CRTCs.
         */
-       ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1);
+       ret = drm_vblank_init(dev, rcdu->num_crtcs);
        if (ret < 0)
                return ret;
 
index 9e07758a755c254ee8fc200037196237ee841948..39d5ae3fdf72b1de5ffa079751b378a080a2087d 100644 (file)
@@ -783,13 +783,14 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
                drm_plane_helper_add(&plane->plane,
                                     &rcar_du_plane_helper_funcs);
 
+               drm_plane_create_alpha_property(&plane->plane);
+
                if (type == DRM_PLANE_TYPE_PRIMARY)
                        continue;
 
                drm_object_attach_property(&plane->plane.base,
                                           rcdu->props.colorkey,
                                           RCAR_DU_COLORKEY_NONE);
-               drm_plane_create_alpha_property(&plane->plane);
                drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
        }
 
index 173d7ad0b991b49257882d1f8ceacec4d6c07cc8..534a128a869d51e438ed5e5c7172ec306ef5a025 100644 (file)
@@ -790,6 +790,7 @@ static const struct of_device_id rcar_lvds_of_table[] = {
        { .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info },
        { .compatible = "renesas,r8a7795-lvds", .data = &rcar_lvds_gen3_info },
        { .compatible = "renesas,r8a7796-lvds", .data = &rcar_lvds_gen3_info },
+       { .compatible = "renesas,r8a77965-lvds", .data = &rcar_lvds_gen3_info },
        { .compatible = "renesas,r8a77970-lvds", .data = &rcar_lvds_r8a77970_info },
        { .compatible = "renesas,r8a77980-lvds", .data = &rcar_lvds_gen3_info },
        { .compatible = "renesas,r8a77990-lvds", .data = &rcar_lvds_r8a77990_info },
index 3e22a54a99c25b52c62ece421596695faf3d3489..4463d3826ecbec0432ee363087c8e6d1f86755cc 100644 (file)
@@ -130,7 +130,14 @@ drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
        int i;
 
        for (i = 0; i < entity->num_rq_list; ++i) {
-               num_jobs = atomic_read(&entity->rq_list[i]->sched->num_jobs);
+               struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched;
+
+               if (!entity->rq_list[i]->sched->ready) {
+                       DRM_WARN("sched%s is not ready, skipping", sched->name);
+                       continue;
+               }
+
+               num_jobs = atomic_read(&sched->num_jobs);
                if (num_jobs < min_jobs) {
                        min_jobs = num_jobs;
                        rq = entity->rq_list[i];
@@ -204,7 +211,6 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
 
        drm_sched_fence_finished(job->s_fence);
        WARN_ON(job->s_fence->parent);
-       dma_fence_put(&job->s_fence->finished);
        job->sched->ops->free_job(job);
 }
 
index 44fe587aaef97d30d4bb744fef25c851eacdcf82..18ebbb05762e99c0706aeba369c8d3d0baeafda7 100644 (file)
@@ -196,6 +196,19 @@ static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
                schedule_delayed_work(&sched->work_tdr, sched->timeout);
 }
 
+/**
+ * drm_sched_fault - immediately start timeout handler
+ *
+ * @sched: scheduler where the timeout handling should be started.
+ *
+ * Start timeout handling immediately when the driver detects a hardware fault.
+ */
+void drm_sched_fault(struct drm_gpu_scheduler *sched)
+{
+       mod_delayed_work(system_wq, &sched->work_tdr, 0);
+}
+EXPORT_SYMBOL(drm_sched_fault);
+
 /* job_finish is called after hw fence signaled
  */
 static void drm_sched_job_finish(struct work_struct *work)
@@ -220,7 +233,6 @@ static void drm_sched_job_finish(struct work_struct *work)
        drm_sched_start_timeout(sched);
        spin_unlock(&sched->job_list_lock);
 
-       dma_fence_put(&s_job->s_fence->finished);
        sched->ops->free_job(s_job);
 }
 
@@ -283,6 +295,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
 already_signaled:
                ;
        }
+       drm_sched_start_timeout(sched);
        spin_unlock(&sched->job_list_lock);
 }
 
@@ -406,6 +419,9 @@ int drm_sched_job_init(struct drm_sched_job *job,
        struct drm_gpu_scheduler *sched;
 
        drm_sched_entity_select_rq(entity);
+       if (!entity->rq)
+               return -ENOENT;
+
        sched = entity->rq->sched;
 
        job->sched = sched;
@@ -423,6 +439,18 @@ int drm_sched_job_init(struct drm_sched_job *job,
 }
 EXPORT_SYMBOL(drm_sched_job_init);
 
+/**
+ * drm_sched_job_cleanup - clean up scheduler job resources
+ *
+ * @job: scheduler job to clean up
+ */
+void drm_sched_job_cleanup(struct drm_sched_job *job)
+{
+       dma_fence_put(&job->s_fence->finished);
+       job->s_fence = NULL;
+}
+EXPORT_SYMBOL(drm_sched_job_cleanup);
+
 /**
  * drm_sched_ready - is the scheduler ready
  *
@@ -619,6 +647,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
                return PTR_ERR(sched->thread);
        }
 
+       sched->ready = true;
        return 0;
 }
 EXPORT_SYMBOL(drm_sched_init);
@@ -634,5 +663,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
 {
        if (sched->thread)
                kthread_stop(sched->thread);
+
+       sched->ready = false;
 }
 EXPORT_SYMBOL(drm_sched_fini);
index 83b4657ffb107aff31997fca9c2fb51836b1087c..d87935bf8e308f81e13ccde697fe35e86fa446c6 100644 (file)
 
 static void ttm_bo_global_kobj_release(struct kobject *kobj);
 
+/**
+ * ttm_global_mutex - protecting the global BO state
+ */
+DEFINE_MUTEX(ttm_global_mutex);
+struct ttm_bo_global ttm_bo_glob = {
+       .use_count = 0
+};
+
 static struct attribute ttm_bo_count = {
        .name = "bo_count",
        .mode = S_IRUGO
@@ -1519,35 +1527,45 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj)
                container_of(kobj, struct ttm_bo_global, kobj);
 
        __free_page(glob->dummy_read_page);
-       kfree(glob);
 }
 
-void ttm_bo_global_release(struct drm_global_reference *ref)
+static void ttm_bo_global_release(void)
 {
-       struct ttm_bo_global *glob = ref->object;
+       struct ttm_bo_global *glob = &ttm_bo_glob;
+
+       mutex_lock(&ttm_global_mutex);
+       if (--glob->use_count > 0)
+               goto out;
 
        kobject_del(&glob->kobj);
        kobject_put(&glob->kobj);
+       ttm_mem_global_release(&ttm_mem_glob);
+out:
+       mutex_unlock(&ttm_global_mutex);
 }
-EXPORT_SYMBOL(ttm_bo_global_release);
 
-int ttm_bo_global_init(struct drm_global_reference *ref)
+static int ttm_bo_global_init(void)
 {
-       struct ttm_bo_global_ref *bo_ref =
-               container_of(ref, struct ttm_bo_global_ref, ref);
-       struct ttm_bo_global *glob = ref->object;
-       int ret;
+       struct ttm_bo_global *glob = &ttm_bo_glob;
+       int ret = 0;
        unsigned i;
 
-       mutex_init(&glob->device_list_mutex);
+       mutex_lock(&ttm_global_mutex);
+       if (++glob->use_count > 1)
+               goto out;
+
+       ret = ttm_mem_global_init(&ttm_mem_glob);
+       if (ret)
+               goto out;
+
        spin_lock_init(&glob->lru_lock);
-       glob->mem_glob = bo_ref->mem_glob;
+       glob->mem_glob = &ttm_mem_glob;
        glob->mem_glob->bo_glob = glob;
        glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
 
        if (unlikely(glob->dummy_read_page == NULL)) {
                ret = -ENOMEM;
-               goto out_no_drp;
+               goto out;
        }
 
        for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
@@ -1559,13 +1577,10 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
                &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
        if (unlikely(ret != 0))
                kobject_put(&glob->kobj);
-       return ret;
-out_no_drp:
-       kfree(glob);
+out:
+       mutex_unlock(&ttm_global_mutex);
        return ret;
 }
-EXPORT_SYMBOL(ttm_bo_global_init);
-
 
 int ttm_bo_device_release(struct ttm_bo_device *bdev)
 {
@@ -1587,9 +1602,9 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
                }
        }
 
-       mutex_lock(&glob->device_list_mutex);
+       mutex_lock(&ttm_global_mutex);
        list_del(&bdev->device_list);
-       mutex_unlock(&glob->device_list_mutex);
+       mutex_unlock(&ttm_global_mutex);
 
        cancel_delayed_work_sync(&bdev->wq);
 
@@ -1604,18 +1619,25 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
 
        drm_vma_offset_manager_destroy(&bdev->vma_manager);
 
+       if (!ret)
+               ttm_bo_global_release();
+
        return ret;
 }
 EXPORT_SYMBOL(ttm_bo_device_release);
 
 int ttm_bo_device_init(struct ttm_bo_device *bdev,
-                      struct ttm_bo_global *glob,
                       struct ttm_bo_driver *driver,
                       struct address_space *mapping,
                       uint64_t file_page_offset,
                       bool need_dma32)
 {
-       int ret = -EINVAL;
+       struct ttm_bo_global *glob = &ttm_bo_glob;
+       int ret;
+
+       ret = ttm_bo_global_init();
+       if (ret)
+               return ret;
 
        bdev->driver = driver;
 
@@ -1636,12 +1658,13 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
        bdev->dev_mapping = mapping;
        bdev->glob = glob;
        bdev->need_dma32 = need_dma32;
-       mutex_lock(&glob->device_list_mutex);
+       mutex_lock(&ttm_global_mutex);
        list_add_tail(&bdev->device_list, &glob->device_list);
-       mutex_unlock(&glob->device_list_mutex);
+       mutex_unlock(&ttm_global_mutex);
 
        return 0;
 out_no_sys:
+       ttm_bo_global_release();
        return ret;
 }
 EXPORT_SYMBOL(ttm_bo_device_init);
index e493edb0d3e71299de52b4fceec4f6ac59afe9c2..efa005a1c1b79d3e7faa36f23a09419c95cf53a8 100644 (file)
@@ -187,14 +187,12 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
        struct ttm_buffer_object *bo;
        struct ttm_bo_global *glob;
        struct ttm_bo_device *bdev;
-       struct ttm_bo_driver *driver;
 
        if (list_empty(list))
                return;
 
        bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
        bdev = bo->bdev;
-       driver = bdev->driver;
        glob = bo->bdev->glob;
 
        spin_lock(&glob->lru_lock);
index 450387c92b63510df8b9e8ff340b511472b69462..f1567c353b543a3376c6b64ab5fb6c4550f91b23 100644 (file)
@@ -41,6 +41,9 @@
 
 #define TTM_MEMORY_ALLOC_RETRIES 4
 
+struct ttm_mem_global ttm_mem_glob;
+EXPORT_SYMBOL(ttm_mem_glob);
+
 struct ttm_mem_zone {
        struct kobject kobj;
        struct ttm_mem_global *glob;
@@ -216,14 +219,6 @@ static ssize_t ttm_mem_global_store(struct kobject *kobj,
        return size;
 }
 
-static void ttm_mem_global_kobj_release(struct kobject *kobj)
-{
-       struct ttm_mem_global *glob =
-               container_of(kobj, struct ttm_mem_global, kobj);
-
-       kfree(glob);
-}
-
 static struct attribute *ttm_mem_global_attrs[] = {
        &ttm_mem_global_lower_mem_limit,
        NULL
@@ -235,7 +230,6 @@ static const struct sysfs_ops ttm_mem_global_ops = {
 };
 
 static struct kobj_type ttm_mem_glob_kobj_type = {
-       .release = &ttm_mem_global_kobj_release,
        .sysfs_ops = &ttm_mem_global_ops,
        .default_attrs = ttm_mem_global_attrs,
 };
@@ -464,7 +458,6 @@ out_no_zone:
        ttm_mem_global_release(glob);
        return ret;
 }
-EXPORT_SYMBOL(ttm_mem_global_init);
 
 void ttm_mem_global_release(struct ttm_mem_global *glob)
 {
@@ -486,7 +479,6 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
        kobject_del(&glob->kobj);
        kobject_put(&glob->kobj);
 }
-EXPORT_SYMBOL(ttm_mem_global_release);
 
 static void ttm_check_swapping(struct ttm_mem_global *glob)
 {
index e1f2aab0717bd4182d5cf61a073ab3b7f4fc6826..c66d0ce21435e4bdc8bec1a4a867688b5f204466 100644 (file)
@@ -35,6 +35,8 @@ v3d_job_free(struct drm_sched_job *sched_job)
 {
        struct v3d_job *job = to_v3d_job(sched_job);
 
+       drm_sched_job_cleanup(sched_job);
+
        v3d_exec_put(job->exec);
 }
 
@@ -167,9 +169,6 @@ v3d_job_timedout(struct drm_sched_job *sched_job)
        if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) {
                job->timedout_ctca = ctca;
                job->timedout_ctra = ctra;
-
-               schedule_delayed_work(&job->base.sched->work_tdr,
-                                     job->base.sched->timeout);
                return;
        }
 
index 7bec6e36886b09d58fcf06590b393524273c504f..f7e877857c1fc7083e1d8e8e6c94d6e4cdf34191 100644 (file)
@@ -145,9 +145,6 @@ struct virtio_gpu_fbdev {
 };
 
 struct virtio_gpu_mman {
-       struct ttm_bo_global_ref        bo_global_ref;
-       struct drm_global_reference     mem_global_ref;
-       bool                            mem_global_referenced;
        struct ttm_bo_device            bdev;
 };
 
index cd63dffa6d40be8cc5826bf0b2e5556c892b1088..4bfbf25fabff8091cab1518f5fc4302a0e4dfd56 100644 (file)
@@ -50,62 +50,6 @@ virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev)
        return vgdev;
 }
 
-static int virtio_gpu_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-       return ttm_mem_global_init(ref->object);
-}
-
-static void virtio_gpu_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-       ttm_mem_global_release(ref->object);
-}
-
-static int virtio_gpu_ttm_global_init(struct virtio_gpu_device *vgdev)
-{
-       struct drm_global_reference *global_ref;
-       int r;
-
-       vgdev->mman.mem_global_referenced = false;
-       global_ref = &vgdev->mman.mem_global_ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-       global_ref->size = sizeof(struct ttm_mem_global);
-       global_ref->init = &virtio_gpu_ttm_mem_global_init;
-       global_ref->release = &virtio_gpu_ttm_mem_global_release;
-
-       r = drm_global_item_ref(global_ref);
-       if (r != 0) {
-               DRM_ERROR("Failed setting up TTM memory accounting "
-                         "subsystem.\n");
-               return r;
-       }
-
-       vgdev->mman.bo_global_ref.mem_glob =
-               vgdev->mman.mem_global_ref.object;
-       global_ref = &vgdev->mman.bo_global_ref.ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_BO;
-       global_ref->size = sizeof(struct ttm_bo_global);
-       global_ref->init = &ttm_bo_global_init;
-       global_ref->release = &ttm_bo_global_release;
-       r = drm_global_item_ref(global_ref);
-       if (r != 0) {
-               DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-               drm_global_item_unref(&vgdev->mman.mem_global_ref);
-               return r;
-       }
-
-       vgdev->mman.mem_global_referenced = true;
-       return 0;
-}
-
-static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev)
-{
-       if (vgdev->mman.mem_global_referenced) {
-               drm_global_item_unref(&vgdev->mman.bo_global_ref.ref);
-               drm_global_item_unref(&vgdev->mman.mem_global_ref);
-               vgdev->mman.mem_global_referenced = false;
-       }
-}
-
 int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
 {
        struct drm_file *file_priv;
@@ -382,12 +326,8 @@ int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
 {
        int r;
 
-       r = virtio_gpu_ttm_global_init(vgdev);
-       if (r)
-               return r;
        /* No others user of address space so set it to 0 */
        r = ttm_bo_device_init(&vgdev->mman.bdev,
-                              vgdev->mman.bo_global_ref.ref.object,
                               &virtio_gpu_bo_driver,
                               vgdev->ddev->anon_inode->i_mapping,
                               DRM_FILE_PAGE_OFFSET, 0);
@@ -406,13 +346,11 @@ int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
 err_mm_init:
        ttm_bo_device_release(&vgdev->mman.bdev);
 err_dev_init:
-       virtio_gpu_ttm_global_fini(vgdev);
        return r;
 }
 
 void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev)
 {
        ttm_bo_device_release(&vgdev->mman.bdev);
-       virtio_gpu_ttm_global_fini(vgdev);
        DRM_INFO("virtio_gpu: ttm finalized\n");
 }
index 61a84b958d671671cb56c37b6e44a589989d026b..b9c078860a7c20d677dbba5a0d29f8c258b8127f 100644 (file)
@@ -801,11 +801,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
                 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
 
-       ret = vmw_ttm_global_init(dev_priv);
-       if (unlikely(ret != 0))
-               goto out_err0;
-
-
        vmw_master_init(&dev_priv->fbdev_master);
        ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
        dev_priv->active_master = &dev_priv->fbdev_master;
@@ -816,7 +811,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        if (unlikely(dev_priv->mmio_virt == NULL)) {
                ret = -ENOMEM;
                DRM_ERROR("Failed mapping MMIO.\n");
-               goto out_err3;
+               goto out_err0;
        }
 
        /* Need mmio memory to check for fifo pitchlock cap. */
@@ -828,8 +823,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                goto out_err4;
        }
 
-       dev_priv->tdev = ttm_object_device_init
-               (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
+       dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
+                                               &vmw_prime_dmabuf_ops);
 
        if (unlikely(dev_priv->tdev == NULL)) {
                DRM_ERROR("Unable to initialize TTM object management.\n");
@@ -870,7 +865,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        }
 
        ret = ttm_bo_device_init(&dev_priv->bdev,
-                                dev_priv->bo_global_ref.ref.object,
                                 &vmw_bo_driver,
                                 dev->anon_inode->i_mapping,
                                 VMWGFX_FILE_PAGE_OFFSET,
@@ -992,8 +986,6 @@ out_no_device:
        ttm_object_device_release(&dev_priv->tdev);
 out_err4:
        memunmap(dev_priv->mmio_virt);
-out_err3:
-       vmw_ttm_global_release(dev_priv);
 out_err0:
        for (i = vmw_res_context; i < vmw_res_max; ++i)
                idr_destroy(&dev_priv->res_idr[i]);
@@ -1045,7 +1037,6 @@ static void vmw_driver_unload(struct drm_device *dev)
        memunmap(dev_priv->mmio_virt);
        if (dev_priv->ctx.staged_bindings)
                vmw_binding_state_free(dev_priv->ctx.staged_bindings);
-       vmw_ttm_global_release(dev_priv);
 
        for (i = vmw_res_context; i < vmw_res_max; ++i)
                idr_destroy(&dev_priv->res_idr[i]);
index 59f614225bcd72b8a84a5de42813359e9a2760d0..28df788da44e30bb70015e151510435ab8c145e0 100644 (file)
@@ -417,8 +417,6 @@ enum {
 
 struct vmw_private {
        struct ttm_bo_device bdev;
-       struct ttm_bo_global_ref bo_global_ref;
-       struct drm_global_reference mem_global_ref;
 
        struct vmw_fifo_state fifo;
 
@@ -842,8 +840,6 @@ extern int vmw_fifo_flush(struct vmw_private *dev_priv,
  * TTM glue - vmwgfx_ttm_glue.c
  */
 
-extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
-extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
 extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
 
 /**
@@ -1363,7 +1359,7 @@ vmw_bo_reference(struct vmw_buffer_object *buf)
 
 static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
 {
-       return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
+       return &ttm_mem_glob;
 }
 
 static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
index 7b1e5a5cbd2c7758aadf70f784f4ce9c629ca1db..154eb09aa91eb3ec96c74ede4b9b3ecd3117bfa1 100644 (file)
@@ -42,57 +42,3 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
        dev_priv = vmw_priv(file_priv->minor->dev);
        return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
 }
-
-static int vmw_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-       DRM_INFO("global init.\n");
-       return ttm_mem_global_init(ref->object);
-}
-
-static void vmw_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-       ttm_mem_global_release(ref->object);
-}
-
-int vmw_ttm_global_init(struct vmw_private *dev_priv)
-{
-       struct drm_global_reference *global_ref;
-       int ret;
-
-       global_ref = &dev_priv->mem_global_ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-       global_ref->size = sizeof(struct ttm_mem_global);
-       global_ref->init = &vmw_ttm_mem_global_init;
-       global_ref->release = &vmw_ttm_mem_global_release;
-
-       ret = drm_global_item_ref(global_ref);
-       if (unlikely(ret != 0)) {
-               DRM_ERROR("Failed setting up TTM memory accounting.\n");
-               return ret;
-       }
-
-       dev_priv->bo_global_ref.mem_glob =
-               dev_priv->mem_global_ref.object;
-       global_ref = &dev_priv->bo_global_ref.ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_BO;
-       global_ref->size = sizeof(struct ttm_bo_global);
-       global_ref->init = &ttm_bo_global_init;
-       global_ref->release = &ttm_bo_global_release;
-       ret = drm_global_item_ref(global_ref);
-
-       if (unlikely(ret != 0)) {
-               DRM_ERROR("Failed setting up TTM buffer objects.\n");
-               goto out_no_bo;
-       }
-
-       return 0;
-out_no_bo:
-       drm_global_item_unref(&dev_priv->mem_global_ref);
-       return ret;
-}
-
-void vmw_ttm_global_release(struct vmw_private *dev_priv)
-{
-       drm_global_item_unref(&dev_priv->bo_global_ref.ref);
-       drm_global_item_unref(&dev_priv->mem_global_ref);
-}
index cf2a18571d484d078dc1eabc59a3d6ff0f11ab07..a132c37d733490fa70af2674237162d73459a31d 100644 (file)
@@ -380,6 +380,9 @@ int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
                        mutex_unlock(&vgasr_mutex);
                        return -EINVAL;
                }
+               /* notify if GPU has been already bound */
+               if (ops->gpu_bound)
+                       ops->gpu_bound(pdev, id);
        }
        mutex_unlock(&vgasr_mutex);
 
index aec253b44156891bf71c192c1cd1e41b2e5801a3..3cd7229b6e5465b88759d42a9a12084cd948eeab 100644 (file)
@@ -660,6 +660,20 @@ exit:
        return ret;
 }
 
+static int alps_sp_open(struct input_dev *dev)
+{
+       struct hid_device *hid = input_get_drvdata(dev);
+
+       return hid_hw_open(hid);
+}
+
+static void alps_sp_close(struct input_dev *dev)
+{
+       struct hid_device *hid = input_get_drvdata(dev);
+
+       hid_hw_close(hid);
+}
+
 static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
 {
        struct alps_dev *data = hid_get_drvdata(hdev);
@@ -733,6 +747,10 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
                input2->id.version = input->id.version;
                input2->dev.parent = input->dev.parent;
 
+               input_set_drvdata(input2, hdev);
+               input2->open = alps_sp_open;
+               input2->close = alps_sp_close;
+
                __set_bit(EV_KEY, input2->evbit);
                data->sp_btn_cnt = (data->sp_btn_info & 0x0F);
                for (i = 0; i < data->sp_btn_cnt; i++)
index dc6d6477e9611eb2ba93a8d687bfaf9d4e634ef8..a1fa2fc8c9b57fd8e3de462d35b6247bd0d3e6e3 100644 (file)
@@ -359,6 +359,9 @@ static bool asus_kbd_wmi_led_control_present(struct hid_device *hdev)
        u32 value;
        int ret;
 
+       if (!IS_ENABLED(CONFIG_ASUS_WMI))
+               return false;
+
        ret = asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS2,
                                       ASUS_WMI_DEVID_KBD_BACKLIGHT, 0, &value);
        hid_dbg(hdev, "WMI backlight check: rc %d value %x", ret, value);
index f63489c882bb64f98f4c5bf9c84cbeb9df192c71..c0d668944dbe876260a926c0125f4ec1ab4afc52 100644 (file)
 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003                0x3003
 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008                0x3008
 
+#define I2C_VENDOR_ID_RAYDIUM          0x2386
+#define I2C_PRODUCT_ID_RAYDIUM_4B33    0x4b33
+
 #define USB_VENDOR_ID_RAZER            0x1532
 #define USB_DEVICE_ID_RAZER_BLADE_14   0x011D
 
index 52c3b01917e7236821b804a35711b1e034da4788..8237dd86fb17fa9157fd4c97b6da734e34af9733 100644 (file)
@@ -107,7 +107,6 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT },
-       { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS), HID_QUIRK_NOGET },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
index 4aab96cf081861f863a008cda0a080425fdf31fa..3cde7c1b9c33cd673858d5089ed097e724fb12b0 100644 (file)
@@ -49,6 +49,7 @@
 #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV       BIT(0)
 #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET       BIT(1)
 #define I2C_HID_QUIRK_NO_RUNTIME_PM            BIT(2)
+#define I2C_HID_QUIRK_DELAY_AFTER_SLEEP                BIT(3)
 
 /* flags */
 #define I2C_HID_STARTED                0
@@ -158,6 +159,8 @@ struct i2c_hid {
 
        bool                    irq_wake_enabled;
        struct mutex            reset_lock;
+
+       unsigned long           sleep_delay;
 };
 
 static const struct i2c_hid_quirks {
@@ -172,6 +175,8 @@ static const struct i2c_hid_quirks {
        { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
                I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
                I2C_HID_QUIRK_NO_RUNTIME_PM },
+       { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_4B33,
+               I2C_HID_QUIRK_DELAY_AFTER_SLEEP },
        { 0, 0 }
 };
 
@@ -387,6 +392,7 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
 {
        struct i2c_hid *ihid = i2c_get_clientdata(client);
        int ret;
+       unsigned long now, delay;
 
        i2c_hid_dbg(ihid, "%s\n", __func__);
 
@@ -404,9 +410,22 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
                        goto set_pwr_exit;
        }
 
+       if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
+           power_state == I2C_HID_PWR_ON) {
+               now = jiffies;
+               if (time_after(ihid->sleep_delay, now)) {
+                       delay = jiffies_to_usecs(ihid->sleep_delay - now);
+                       usleep_range(delay, delay + 1);
+               }
+       }
+
        ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
                0, NULL, 0, NULL, 0);
 
+       if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
+           power_state == I2C_HID_PWR_SLEEP)
+               ihid->sleep_delay = jiffies + msecs_to_jiffies(20);
+
        if (ret)
                dev_err(&client->dev, "failed to change power setting.\n");
 
index cac262a912c1248747d2814fa9e3b3d3512f8c76..89f2976f9c534c475da40c3933d2775e46787ae9 100644 (file)
@@ -330,6 +330,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
                },
                .driver_data = (void *)&sipodev_desc
        },
+       {
+               .ident = "Direkt-Tek DTLAPY133-1",
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "DTLAPY133-1"),
+               },
+               .driver_data = (void *)&sipodev_desc
+       },
        {
                .ident = "Mediacom Flexbook Edge 11",
                .matches = {
index 23872d08308cdb5857d53b5bcdf907e20d74c345..a746017fac170ca15895435fd4df4fbe3a04d51f 100644 (file)
@@ -512,14 +512,24 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
                        if (cmd == HIDIOCGCOLLECTIONINDEX) {
                                if (uref->usage_index >= field->maxusage)
                                        goto inval;
+                               uref->usage_index =
+                                       array_index_nospec(uref->usage_index,
+                                                          field->maxusage);
                        } else if (uref->usage_index >= field->report_count)
                                goto inval;
                }
 
-               if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
-                   (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
-                    uref->usage_index + uref_multi->num_values > field->report_count))
-                       goto inval;
+               if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) {
+                       if (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
+                           uref->usage_index + uref_multi->num_values >
+                           field->report_count)
+                               goto inval;
+
+                       uref->usage_index =
+                               array_index_nospec(uref->usage_index,
+                                                  field->report_count -
+                                                  uref_multi->num_values);
+               }
 
                switch (cmd) {
                case HIDIOCGUSAGE:
index 975c951698846bffb8ae98ec4a70759e60a1f11b..84f61cec6319c8eb65ccc80553cbbdfde0542cc9 100644 (file)
@@ -649,8 +649,10 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
                                if (info[i]->config[j] & HWMON_T_INPUT) {
                                        err = hwmon_thermal_add_sensor(dev,
                                                                hwdev, j);
-                                       if (err)
-                                               goto free_device;
+                                       if (err) {
+                                               device_unregister(hdev);
+                                               goto ida_remove;
+                                       }
                                }
                        }
                }
@@ -658,8 +660,6 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
 
        return hdev;
 
-free_device:
-       device_unregister(hdev);
 free_hwmon:
        kfree(hwdev);
 ida_remove:
index 0ccca87f527191dc000649d1a0b1eaf44c87d35b..293dd1c6c7b36ef2b0770cf76e465aaea22b4673 100644 (file)
@@ -181,7 +181,7 @@ static ssize_t show_label(struct device *dev, struct device_attribute *devattr,
        return sprintf(buf, "%s\n", sdata->label);
 }
 
-static int __init get_logical_cpu(int hwcpu)
+static int get_logical_cpu(int hwcpu)
 {
        int cpu;
 
@@ -192,9 +192,8 @@ static int __init get_logical_cpu(int hwcpu)
        return -ENOENT;
 }
 
-static void __init make_sensor_label(struct device_node *np,
-                                    struct sensor_data *sdata,
-                                    const char *label)
+static void make_sensor_label(struct device_node *np,
+                             struct sensor_data *sdata, const char *label)
 {
        u32 id;
        size_t n;
index 56ccb1ea7da5b405e904d90ac38b17303a3d2faa..f2c6819712013046246002346af928bd1ab16bc0 100644 (file)
@@ -224,6 +224,15 @@ config I2C_NFORCE2_S4985
          This driver can also be built as a module.  If so, the module
          will be called i2c-nforce2-s4985.
 
+config I2C_NVIDIA_GPU
+       tristate "NVIDIA GPU I2C controller"
+       depends on PCI
+       help
+         If you say yes to this option, support will be included for the
+         NVIDIA GPU I2C controller which is used to communicate with the GPU's
+         Type-C controller. This driver can also be built as a module called
+         i2c-nvidia-gpu.
+
 config I2C_SIS5595
        tristate "SiS 5595"
        depends on PCI
@@ -752,7 +761,7 @@ config I2C_OCORES
 
 config I2C_OMAP
        tristate "OMAP I2C adapter"
-       depends on ARCH_OMAP
+       depends on ARCH_OMAP || ARCH_K3
        default y if MACH_OMAP_H3 || MACH_OMAP_OSK
        help
          If you say yes to this option, support will be included for the
index 18b26af82b1c5425a9dcec9c61cca3cdff694d60..5f0cb6915969aa98d5722b02e0fe9cb9a1ae25a7 100644 (file)
@@ -19,6 +19,7 @@ obj-$(CONFIG_I2C_ISCH)                += i2c-isch.o
 obj-$(CONFIG_I2C_ISMT)         += i2c-ismt.o
 obj-$(CONFIG_I2C_NFORCE2)      += i2c-nforce2.o
 obj-$(CONFIG_I2C_NFORCE2_S4985)        += i2c-nforce2-s4985.o
+obj-$(CONFIG_I2C_NVIDIA_GPU)   += i2c-nvidia-gpu.o
 obj-$(CONFIG_I2C_PIIX4)                += i2c-piix4.o
 obj-$(CONFIG_I2C_SIS5595)      += i2c-sis5595.o
 obj-$(CONFIG_I2C_SIS630)       += i2c-sis630.o
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
new file mode 100644 (file)
index 0000000..8822357
--- /dev/null
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nvidia GPU I2C controller Driver
+ *
+ * Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
+ * Author: Ajay Gupta <ajayg@nvidia.com>
+ */
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#include <asm/unaligned.h>
+
+/* I2C definitions */
+#define I2C_MST_CNTL                           0x00
+#define I2C_MST_CNTL_GEN_START                 BIT(0)
+#define I2C_MST_CNTL_GEN_STOP                  BIT(1)
+#define I2C_MST_CNTL_CMD_READ                  (1 << 2)
+#define I2C_MST_CNTL_CMD_WRITE                 (2 << 2)
+#define I2C_MST_CNTL_BURST_SIZE_SHIFT          6
+#define I2C_MST_CNTL_GEN_NACK                  BIT(28)
+#define I2C_MST_CNTL_STATUS                    GENMASK(30, 29)
+#define I2C_MST_CNTL_STATUS_OKAY               (0 << 29)
+#define I2C_MST_CNTL_STATUS_NO_ACK             (1 << 29)
+#define I2C_MST_CNTL_STATUS_TIMEOUT            (2 << 29)
+#define I2C_MST_CNTL_STATUS_BUS_BUSY           (3 << 29)
+#define I2C_MST_CNTL_CYCLE_TRIGGER             BIT(31)
+
+#define I2C_MST_ADDR                           0x04
+
+#define I2C_MST_I2C0_TIMING                            0x08
+#define I2C_MST_I2C0_TIMING_SCL_PERIOD_100KHZ          0x10e
+#define I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT            16
+#define I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT_MAX                255
+#define I2C_MST_I2C0_TIMING_TIMEOUT_CHECK              BIT(24)
+
+#define I2C_MST_DATA                                   0x0c
+
+#define I2C_MST_HYBRID_PADCTL                          0x20
+#define I2C_MST_HYBRID_PADCTL_MODE_I2C                 BIT(0)
+#define I2C_MST_HYBRID_PADCTL_I2C_SCL_INPUT_RCV                BIT(14)
+#define I2C_MST_HYBRID_PADCTL_I2C_SDA_INPUT_RCV                BIT(15)
+
+struct gpu_i2c_dev {
+       struct device *dev;
+       void __iomem *regs;
+       struct i2c_adapter adapter;
+       struct i2c_board_info *gpu_ccgx_ucsi;
+};
+
+static void gpu_enable_i2c_bus(struct gpu_i2c_dev *i2cd)
+{
+       u32 val;
+
+       /* enable I2C */
+       val = readl(i2cd->regs + I2C_MST_HYBRID_PADCTL);
+       val |= I2C_MST_HYBRID_PADCTL_MODE_I2C |
+               I2C_MST_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
+               I2C_MST_HYBRID_PADCTL_I2C_SDA_INPUT_RCV;
+       writel(val, i2cd->regs + I2C_MST_HYBRID_PADCTL);
+
+       /* enable 100KHZ mode */
+       val = I2C_MST_I2C0_TIMING_SCL_PERIOD_100KHZ;
+       val |= (I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT_MAX
+           << I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT);
+       val |= I2C_MST_I2C0_TIMING_TIMEOUT_CHECK;
+       writel(val, i2cd->regs + I2C_MST_I2C0_TIMING);
+}
+
+static int gpu_i2c_check_status(struct gpu_i2c_dev *i2cd)
+{
+       unsigned long target = jiffies + msecs_to_jiffies(1000);
+       u32 val;
+
+       do {
+               val = readl(i2cd->regs + I2C_MST_CNTL);
+               if (!(val & I2C_MST_CNTL_CYCLE_TRIGGER))
+                       break;
+               if ((val & I2C_MST_CNTL_STATUS) !=
+                               I2C_MST_CNTL_STATUS_BUS_BUSY)
+                       break;
+               usleep_range(500, 600);
+       } while (time_is_after_jiffies(target));
+
+       if (time_is_before_jiffies(target)) {
+               dev_err(i2cd->dev, "i2c timeout error %x\n", val);
+               return -ETIME;
+       }
+
+       val = readl(i2cd->regs + I2C_MST_CNTL);
+       switch (val & I2C_MST_CNTL_STATUS) {
+       case I2C_MST_CNTL_STATUS_OKAY:
+               return 0;
+       case I2C_MST_CNTL_STATUS_NO_ACK:
+               return -EIO;
+       case I2C_MST_CNTL_STATUS_TIMEOUT:
+               return -ETIME;
+       default:
+               return 0;
+       }
+}
+
+static int gpu_i2c_read(struct gpu_i2c_dev *i2cd, u8 *data, u16 len)
+{
+       int status;
+       u32 val;
+
+       val = I2C_MST_CNTL_GEN_START | I2C_MST_CNTL_CMD_READ |
+               (len << I2C_MST_CNTL_BURST_SIZE_SHIFT) |
+               I2C_MST_CNTL_CYCLE_TRIGGER | I2C_MST_CNTL_GEN_NACK;
+       writel(val, i2cd->regs + I2C_MST_CNTL);
+
+       status = gpu_i2c_check_status(i2cd);
+       if (status < 0)
+               return status;
+
+       val = readl(i2cd->regs + I2C_MST_DATA);
+       switch (len) {
+       case 1:
+               data[0] = val;
+               break;
+       case 2:
+               put_unaligned_be16(val, data);
+               break;
+       case 3:
+               put_unaligned_be16(val >> 8, data);
+               data[2] = val;
+               break;
+       case 4:
+               put_unaligned_be32(val, data);
+               break;
+       default:
+               break;
+       }
+       return status;
+}
+
+static int gpu_i2c_start(struct gpu_i2c_dev *i2cd)
+{
+       writel(I2C_MST_CNTL_GEN_START, i2cd->regs + I2C_MST_CNTL);
+       return gpu_i2c_check_status(i2cd);
+}
+
+static int gpu_i2c_stop(struct gpu_i2c_dev *i2cd)
+{
+       writel(I2C_MST_CNTL_GEN_STOP, i2cd->regs + I2C_MST_CNTL);
+       return gpu_i2c_check_status(i2cd);
+}
+
+static int gpu_i2c_write(struct gpu_i2c_dev *i2cd, u8 data)
+{
+       u32 val;
+
+       writel(data, i2cd->regs + I2C_MST_DATA);
+
+       val = I2C_MST_CNTL_CMD_WRITE | (1 << I2C_MST_CNTL_BURST_SIZE_SHIFT);
+       writel(val, i2cd->regs + I2C_MST_CNTL);
+
+       return gpu_i2c_check_status(i2cd);
+}
+
+static int gpu_i2c_master_xfer(struct i2c_adapter *adap,
+                              struct i2c_msg *msgs, int num)
+{
+       struct gpu_i2c_dev *i2cd = i2c_get_adapdata(adap);
+       int status, status2;
+       int i, j;
+
+       /*
+        * The controller supports maximum 4 byte read due to known
+        * limitation of sending STOP after every read.
+        */
+       for (i = 0; i < num; i++) {
+               if (msgs[i].flags & I2C_M_RD) {
+                       /* program client address before starting read */
+                       writel(msgs[i].addr, i2cd->regs + I2C_MST_ADDR);
+                       /* gpu_i2c_read has implicit start */
+                       status = gpu_i2c_read(i2cd, msgs[i].buf, msgs[i].len);
+                       if (status < 0)
+                               goto stop;
+               } else {
+                       u8 addr = i2c_8bit_addr_from_msg(msgs + i);
+
+                       status = gpu_i2c_start(i2cd);
+                       if (status < 0) {
+                               if (i == 0)
+                                       return status;
+                               goto stop;
+                       }
+
+                       status = gpu_i2c_write(i2cd, addr);
+                       if (status < 0)
+                               goto stop;
+
+                       for (j = 0; j < msgs[i].len; j++) {
+                               status = gpu_i2c_write(i2cd, msgs[i].buf[j]);
+                               if (status < 0)
+                                       goto stop;
+                       }
+               }
+       }
+       status = gpu_i2c_stop(i2cd);
+       if (status < 0)
+               return status;
+
+       return i;
+stop:
+       status2 = gpu_i2c_stop(i2cd);
+       if (status2 < 0)
+               dev_err(i2cd->dev, "i2c stop failed %d\n", status2);
+       return status;
+}
+
+static const struct i2c_adapter_quirks gpu_i2c_quirks = {
+       .max_read_len = 4,
+       .flags = I2C_AQ_COMB_WRITE_THEN_READ,
+};
+
+static u32 gpu_i2c_functionality(struct i2c_adapter *adap)
+{
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm gpu_i2c_algorithm = {
+       .master_xfer    = gpu_i2c_master_xfer,
+       .functionality  = gpu_i2c_functionality,
+};
+
+/*
+ * This driver is for Nvidia GPU cards with USB Type-C interface.
+ * We want to identify the cards using vendor ID and class code only
+ * to avoid dependency of adding product id for any new card which
+ * requires this driver.
+ * Currently there is no class code defined for UCSI device over PCI
+ * so using UNKNOWN class for now and it will be updated when UCSI
+ * over PCI gets a class code.
+ * There is no other NVIDIA cards with UNKNOWN class code. Even if the
+ * driver gets loaded for an undesired card then eventually i2c_read()
+ * (initiated from UCSI i2c_client) will timeout or UCSI commands will
+ * timeout.
+ */
+#define PCI_CLASS_SERIAL_UNKNOWN       0x0c80
+static const struct pci_device_id gpu_i2c_ids[] = {
+       { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+               PCI_CLASS_SERIAL_UNKNOWN << 8, 0xffffff00},
+       { }
+};
+MODULE_DEVICE_TABLE(pci, gpu_i2c_ids);
+
+static int gpu_populate_client(struct gpu_i2c_dev *i2cd, int irq)
+{
+       struct i2c_client *ccgx_client;
+
+       i2cd->gpu_ccgx_ucsi = devm_kzalloc(i2cd->dev,
+                                          sizeof(*i2cd->gpu_ccgx_ucsi),
+                                          GFP_KERNEL);
+       if (!i2cd->gpu_ccgx_ucsi)
+               return -ENOMEM;
+
+       strlcpy(i2cd->gpu_ccgx_ucsi->type, "ccgx-ucsi",
+               sizeof(i2cd->gpu_ccgx_ucsi->type));
+       i2cd->gpu_ccgx_ucsi->addr = 0x8;
+       i2cd->gpu_ccgx_ucsi->irq = irq;
+       ccgx_client = i2c_new_device(&i2cd->adapter, i2cd->gpu_ccgx_ucsi);
+       if (!ccgx_client)
+               return -ENODEV;
+
+       return 0;
+}
+
+static int gpu_i2c_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct gpu_i2c_dev *i2cd;
+       int status;
+
+       i2cd = devm_kzalloc(&pdev->dev, sizeof(*i2cd), GFP_KERNEL);
+       if (!i2cd)
+               return -ENOMEM;
+
+       i2cd->dev = &pdev->dev;
+       dev_set_drvdata(&pdev->dev, i2cd);
+
+       status = pcim_enable_device(pdev);
+       if (status < 0) {
+               dev_err(&pdev->dev, "pcim_enable_device failed %d\n", status);
+               return status;
+       }
+
+       pci_set_master(pdev);
+
+       i2cd->regs = pcim_iomap(pdev, 0, 0);
+       if (!i2cd->regs) {
+               dev_err(&pdev->dev, "pcim_iomap failed\n");
+               return -ENOMEM;
+       }
+
+       status = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+       if (status < 0) {
+               dev_err(&pdev->dev, "pci_alloc_irq_vectors err %d\n", status);
+               return status;
+       }
+
+       gpu_enable_i2c_bus(i2cd);
+
+       i2c_set_adapdata(&i2cd->adapter, i2cd);
+       i2cd->adapter.owner = THIS_MODULE;
+       strlcpy(i2cd->adapter.name, "NVIDIA GPU I2C adapter",
+               sizeof(i2cd->adapter.name));
+       i2cd->adapter.algo = &gpu_i2c_algorithm;
+       i2cd->adapter.quirks = &gpu_i2c_quirks;
+       i2cd->adapter.dev.parent = &pdev->dev;
+       status = i2c_add_adapter(&i2cd->adapter);
+       if (status < 0)
+               goto free_irq_vectors;
+
+       status = gpu_populate_client(i2cd, pdev->irq);
+       if (status < 0) {
+               dev_err(&pdev->dev, "gpu_populate_client failed %d\n", status);
+               goto del_adapter;
+       }
+
+       return 0;
+
+del_adapter:
+       i2c_del_adapter(&i2cd->adapter);
+free_irq_vectors:
+       pci_free_irq_vectors(pdev);
+       return status;
+}
+
+static void gpu_i2c_remove(struct pci_dev *pdev)
+{
+       struct gpu_i2c_dev *i2cd = dev_get_drvdata(&pdev->dev);
+
+       i2c_del_adapter(&i2cd->adapter);
+       pci_free_irq_vectors(pdev);
+}
+
+static int gpu_i2c_resume(struct device *dev)
+{
+       struct gpu_i2c_dev *i2cd = dev_get_drvdata(dev);
+
+       gpu_enable_i2c_bus(i2cd);
+       return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(gpu_i2c_driver_pm, NULL, gpu_i2c_resume, NULL);
+
+static struct pci_driver gpu_i2c_driver = {
+       .name           = "nvidia-gpu",
+       .id_table       = gpu_i2c_ids,
+       .probe          = gpu_i2c_probe,
+       .remove         = gpu_i2c_remove,
+       .driver         = {
+               .pm     = &gpu_i2c_driver_pm,
+       },
+};
+
+module_pci_driver(gpu_i2c_driver);
+
+MODULE_AUTHOR("Ajay Gupta <ajayg@nvidia.com>");
+MODULE_DESCRIPTION("Nvidia GPU I2C controller Driver");
+MODULE_LICENSE("GPL v2");
index 527f55c8c4c70e560a9787a610c68017fbb10235..db075bc0d9525d62a7b366abd7c6eb1edeaaa76c 100644 (file)
@@ -571,18 +571,19 @@ static int geni_i2c_probe(struct platform_device *pdev)
 
        dev_dbg(&pdev->dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth);
 
-       ret = i2c_add_adapter(&gi2c->adap);
-       if (ret) {
-               dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret);
-               return ret;
-       }
-
        gi2c->suspended = 1;
        pm_runtime_set_suspended(gi2c->se.dev);
        pm_runtime_set_autosuspend_delay(gi2c->se.dev, I2C_AUTO_SUSPEND_DELAY);
        pm_runtime_use_autosuspend(gi2c->se.dev);
        pm_runtime_enable(gi2c->se.dev);
 
+       ret = i2c_add_adapter(&gi2c->adap);
+       if (ret) {
+               dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret);
+               pm_runtime_disable(gi2c->se.dev);
+               return ret;
+       }
+
        return 0;
 }
 
@@ -590,8 +591,8 @@ static int geni_i2c_remove(struct platform_device *pdev)
 {
        struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
 
-       pm_runtime_disable(gi2c->se.dev);
        i2c_del_adapter(&gi2c->adap);
+       pm_runtime_disable(gi2c->se.dev);
        return 0;
 }
 
index ce7acd115dd8da7578b4fc8d3fc3733692d90743..1870cf87afe1ef7993b6e2cda9a28aeede31f268 100644 (file)
@@ -75,8 +75,6 @@ static void pattern_trig_timer_function(struct timer_list *t)
 {
        struct pattern_trig_data *data = from_timer(data, t, timer);
 
-       mutex_lock(&data->lock);
-
        for (;;) {
                if (!data->is_indefinite && !data->repeat)
                        break;
@@ -87,9 +85,10 @@ static void pattern_trig_timer_function(struct timer_list *t)
                                           data->curr->brightness);
                        mod_timer(&data->timer,
                                  jiffies + msecs_to_jiffies(data->curr->delta_t));
-
-                       /* Skip the tuple with zero duration */
-                       pattern_trig_update_patterns(data);
+                       if (!data->next->delta_t) {
+                               /* Skip the tuple with zero duration */
+                               pattern_trig_update_patterns(data);
+                       }
                        /* Select next tuple */
                        pattern_trig_update_patterns(data);
                } else {
@@ -116,8 +115,6 @@ static void pattern_trig_timer_function(struct timer_list *t)
 
                break;
        }
-
-       mutex_unlock(&data->lock);
 }
 
 static int pattern_trig_start_pattern(struct led_classdev *led_cdev)
@@ -176,14 +173,10 @@ static ssize_t repeat_store(struct device *dev, struct device_attribute *attr,
        if (res < -1 || res == 0)
                return -EINVAL;
 
-       /*
-        * Clear previous patterns' performence firstly, and remove the timer
-        * without mutex lock to avoid dead lock.
-        */
-       del_timer_sync(&data->timer);
-
        mutex_lock(&data->lock);
 
+       del_timer_sync(&data->timer);
+
        if (data->is_hw_pattern)
                led_cdev->pattern_clear(led_cdev);
 
@@ -234,14 +227,10 @@ static ssize_t pattern_trig_store_patterns(struct led_classdev *led_cdev,
        struct pattern_trig_data *data = led_cdev->trigger_data;
        int ccount, cr, offset = 0, err = 0;
 
-       /*
-        * Clear previous patterns' performence firstly, and remove the timer
-        * without mutex lock to avoid dead lock.
-        */
-       del_timer_sync(&data->timer);
-
        mutex_lock(&data->lock);
 
+       del_timer_sync(&data->timer);
+
        if (data->is_hw_pattern)
                led_cdev->pattern_clear(led_cdev);
 
index e514d57a0419defecb8dcbbc8be4604aea1321da..aa983422aa970f1035201a1a4841b7a09d9acc3f 100644 (file)
@@ -207,7 +207,7 @@ comment "Disk-On-Chip Device Drivers"
 config MTD_DOCG3
        tristate "M-Systems Disk-On-Chip G3"
        select BCH
-       select BCH_CONST_PARAMS
+       select BCH_CONST_PARAMS if !MTD_NAND_BCH
        select BITREVERSE
        help
          This provides an MTD device driver for the M-Systems DiskOnChip
index 784c6e1a0391e92c90723e698d8bc148fe3e4916..fd5fe12d74613ecebddb88699dcae5e1862d3829 100644 (file)
@@ -221,7 +221,14 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
                info->mtd = info->subdev[0].mtd;
                ret = 0;
        } else if (info->num_subdev > 1) {
-               struct mtd_info *cdev[nr];
+               struct mtd_info **cdev;
+
+               cdev = kmalloc_array(nr, sizeof(*cdev), GFP_KERNEL);
+               if (!cdev) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
+
                /*
                 * We detected multiple devices.  Concatenate them together.
                 */
@@ -230,6 +237,7 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
 
                info->mtd = mtd_concat_create(cdev, info->num_subdev,
                                              plat->name);
+               kfree(cdev);
                if (info->mtd == NULL) {
                        ret = -ENXIO;
                        goto err;
index 05bd0779fe9bf7eae08acca31b7ba30f7592b9b1..71050a0b31dfe3b6bf273ff4c240e7cfe307080f 100644 (file)
@@ -590,7 +590,6 @@ retry:
 
 /**
  * panic_nand_wait - [GENERIC] wait until the command is done
- * @mtd: MTD device structure
  * @chip: NAND chip structure
  * @timeo: timeout
  *
index e24db817154ee73ad1fc0fd9586f4e294fc2886a..d846428ef038e6b76f84591f73d40e9a4d30db33 100644 (file)
@@ -996,7 +996,7 @@ static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
 err_unmap:
        dma_unmap_single(nor->dev, dma_dst, len, DMA_FROM_DEVICE);
 
-       return 0;
+       return ret;
 }
 
 static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
index 9407ca5f9443338d56a355fe0eadfa6641a429dc..3e54e31889c7b53bbba362a5bd497279dc91b128 100644 (file)
@@ -3250,12 +3250,14 @@ static int spi_nor_init_params(struct spi_nor *nor,
                memcpy(&sfdp_params, params, sizeof(sfdp_params));
                memcpy(&prev_map, &nor->erase_map, sizeof(prev_map));
 
-               if (spi_nor_parse_sfdp(nor, &sfdp_params))
+               if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
+                       nor->addr_width = 0;
                        /* restore previous erase map */
                        memcpy(&nor->erase_map, &prev_map,
                               sizeof(nor->erase_map));
-               else
+               } else {
                        memcpy(params, &sfdp_params, sizeof(*params));
+               }
        }
 
        return 0;
index ffa37adb76817f454505b32d010056dfc4d20dc8..333387f1f1fe66490cda8904a7d6c7aeb2d15287 100644 (file)
@@ -3112,13 +3112,13 @@ static int bond_slave_netdev_event(unsigned long event,
        case NETDEV_CHANGE:
                /* For 802.3ad mode only:
                 * Getting invalid Speed/Duplex values here will put slave
-                * in weird state. So mark it as link-down for the time
+                * in weird state. So mark it as link-fail for the time
                 * being and let link-monitoring (miimon) set it right when
                 * correct speeds/duplex are available.
                 */
                if (bond_update_speed_duplex(slave) &&
                    BOND_MODE(bond) == BOND_MODE_8023AD)
-                       slave->link = BOND_LINK_DOWN;
+                       slave->link = BOND_LINK_FAIL;
 
                if (BOND_MODE(bond) == BOND_MODE_8023AD)
                        bond_3ad_adapter_speed_duplex_changed(slave);
index 54e0ca6ed7308c511ce42bc6ea3dc6e65fb0662b..86b6464b4525c426e09d4d6a9f98bf9a0ee49111 100644 (file)
@@ -1117,11 +1117,6 @@ static int ksz_switch_init(struct ksz_device *dev)
 {
        int i;
 
-       mutex_init(&dev->reg_mutex);
-       mutex_init(&dev->stats_mutex);
-       mutex_init(&dev->alu_mutex);
-       mutex_init(&dev->vlan_mutex);
-
        dev->ds->ops = &ksz_switch_ops;
 
        for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) {
@@ -1206,6 +1201,11 @@ int ksz_switch_register(struct ksz_device *dev)
        if (dev->pdata)
                dev->chip_id = dev->pdata->chip_id;
 
+       mutex_init(&dev->reg_mutex);
+       mutex_init(&dev->stats_mutex);
+       mutex_init(&dev->alu_mutex);
+       mutex_init(&dev->vlan_mutex);
+
        if (ksz_switch_detect(dev))
                return -EINVAL;
 
index d721ccf7d8bed8230fa5fbbac2ecfd5068680cac..38e399e0f30e16cd189cf48874f08e6289565101 100644 (file)
@@ -567,6 +567,8 @@ int mv88e6xxx_g1_stats_clear(struct mv88e6xxx_chip *chip)
        if (err)
                return err;
 
+       /* Keep the histogram mode bits */
+       val &= MV88E6XXX_G1_STATS_OP_HIST_RX_TX;
        val |= MV88E6XXX_G1_STATS_OP_BUSY | MV88E6XXX_G1_STATS_OP_FLUSH_ALL;
 
        err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val);
index 6a633c70f603d7b70c3819b20f73d4b4f1405e64..99ef1daaa4d8027636cc5b0f7f542b7961f6764e 100644 (file)
@@ -407,13 +407,13 @@ static void aq_ethtool_get_pauseparam(struct net_device *ndev,
                                      struct ethtool_pauseparam *pause)
 {
        struct aq_nic_s *aq_nic = netdev_priv(ndev);
+       u32 fc = aq_nic->aq_nic_cfg.flow_control;
 
        pause->autoneg = 0;
 
-       if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
-               pause->rx_pause = 1;
-       if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_TX)
-               pause->tx_pause = 1;
+       pause->rx_pause = !!(fc & AQ_NIC_FC_RX);
+       pause->tx_pause = !!(fc & AQ_NIC_FC_TX);
+
 }
 
 static int aq_ethtool_set_pauseparam(struct net_device *ndev,
index e8689241204e9086fdb2c8750402ff7467e129ac..a1e70da358ca6910f02a82a6ef2e3949f60ee4fe 100644 (file)
@@ -204,6 +204,10 @@ struct aq_hw_ops {
 
        int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
 
+       int (*hw_set_offload)(struct aq_hw_s *self,
+                             struct aq_nic_cfg_s *aq_nic_cfg);
+
+       int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc);
 };
 
 struct aq_fw_ops {
@@ -226,6 +230,8 @@ struct aq_fw_ops {
 
        int (*update_stats)(struct aq_hw_s *self);
 
+       u32 (*get_flow_control)(struct aq_hw_s *self, u32 *fcmode);
+
        int (*set_flow_control)(struct aq_hw_s *self);
 
        int (*set_power)(struct aq_hw_s *self, unsigned int power_state,
index e3ae29e523f0e26738b0ab80a2f3ac431083d13b..7c07eef275eb8498ade72b676dc2eeda8532185e 100644 (file)
@@ -99,8 +99,11 @@ static int aq_ndev_set_features(struct net_device *ndev,
        struct aq_nic_s *aq_nic = netdev_priv(ndev);
        struct aq_nic_cfg_s *aq_cfg = aq_nic_get_cfg(aq_nic);
        bool is_lro = false;
+       int err = 0;
+
+       aq_cfg->features = features;
 
-       if (aq_cfg->hw_features & NETIF_F_LRO) {
+       if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) {
                is_lro = features & NETIF_F_LRO;
 
                if (aq_cfg->is_lro != is_lro) {
@@ -112,8 +115,11 @@ static int aq_ndev_set_features(struct net_device *ndev,
                        }
                }
        }
+       if ((aq_nic->ndev->features ^ features) & NETIF_F_RXCSUM)
+               err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw,
+                                                       aq_cfg);
 
-       return 0;
+       return err;
 }
 
 static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
index 5fed244466871cd69b764ffdfcf55d92a9043763..7abdc0952425921330d3639c99824fe5ae7c0e00 100644 (file)
@@ -118,12 +118,13 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
        }
 
        cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
-       cfg->hw_features = cfg->aq_hw_caps->hw_features;
+       cfg->features = cfg->aq_hw_caps->hw_features;
 }
 
 static int aq_nic_update_link_status(struct aq_nic_s *self)
 {
        int err = self->aq_fw_ops->update_link_status(self->aq_hw);
+       u32 fc = 0;
 
        if (err)
                return err;
@@ -133,6 +134,15 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
                        AQ_CFG_DRV_NAME, self->link_status.mbps,
                        self->aq_hw->aq_link_status.mbps);
                aq_nic_update_interrupt_moderation_settings(self);
+
+               /* Driver has to update flow control settings on RX block
+                * on any link event.
+                * We should query FW whether it negotiated FC.
+                */
+               if (self->aq_fw_ops->get_flow_control)
+                       self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
+               if (self->aq_hw_ops->hw_set_fc)
+                       self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
        }
 
        self->link_status = self->aq_hw->aq_link_status;
@@ -590,7 +600,7 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
                }
        }
 
-       if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) {
+       if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
                packet_filter |= IFF_MULTICAST;
                self->mc_list.count = i;
                self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
@@ -772,7 +782,9 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
                ethtool_link_ksettings_add_link_mode(cmd, advertising,
                                                     Pause);
 
-       if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX)
+       /* Asym is when either RX or TX, but not both */
+       if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^
+           !!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX))
                ethtool_link_ksettings_add_link_mode(cmd, advertising,
                                                     Asym_Pause);
 
index c1582f4e8e1b503f7f5baf60dcd1408e99d19620..44ec47a3d60a57bee0c9b0a62907eb1988ee5a5b 100644 (file)
@@ -23,7 +23,7 @@ struct aq_vec_s;
 
 struct aq_nic_cfg_s {
        const struct aq_hw_caps_s *aq_hw_caps;
-       u64 hw_features;
+       u64 features;
        u32 rxds;               /* rx ring size, descriptors # */
        u32 txds;               /* tx ring size, descriptors # */
        u32 vecs;               /* vecs==allocated irqs */
index 3db91446cc67717b548333e4bd3b95db8281c152..74550ccc7a20ff8437463384e906b718027dc6ef 100644 (file)
@@ -172,6 +172,27 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
        return !!budget;
 }
 
+static void aq_rx_checksum(struct aq_ring_s *self,
+                          struct aq_ring_buff_s *buff,
+                          struct sk_buff *skb)
+{
+       if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM))
+               return;
+
+       if (unlikely(buff->is_cso_err)) {
+               ++self->stats.rx.errors;
+               skb->ip_summed = CHECKSUM_NONE;
+               return;
+       }
+       if (buff->is_ip_cso) {
+               __skb_incr_checksum_unnecessary(skb);
+               if (buff->is_udp_cso || buff->is_tcp_cso)
+                       __skb_incr_checksum_unnecessary(skb);
+       } else {
+               skb->ip_summed = CHECKSUM_NONE;
+       }
+}
+
 #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
 int aq_ring_rx_clean(struct aq_ring_s *self,
                     struct napi_struct *napi,
@@ -267,18 +288,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
                }
 
                skb->protocol = eth_type_trans(skb, ndev);
-               if (unlikely(buff->is_cso_err)) {
-                       ++self->stats.rx.errors;
-                       skb->ip_summed = CHECKSUM_NONE;
-               } else {
-                       if (buff->is_ip_cso) {
-                               __skb_incr_checksum_unnecessary(skb);
-                               if (buff->is_udp_cso || buff->is_tcp_cso)
-                                       __skb_incr_checksum_unnecessary(skb);
-                       } else {
-                               skb->ip_summed = CHECKSUM_NONE;
-                       }
-               }
+
+               aq_rx_checksum(self, buff, skb);
 
                skb_set_hash(skb, buff->rss_hash,
                             buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
index 76d25d594a0f62fedf507e50b58701933b1872c2..f02592f43fe36f3af460672746ade204d1f574fb 100644 (file)
@@ -100,12 +100,17 @@ static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
        return err;
 }
 
+static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
+{
+       hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
+       return 0;
+}
+
 static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
 {
        u32 tc = 0U;
        u32 buff_size = 0U;
        unsigned int i_priority = 0U;
-       bool is_rx_flow_control = false;
 
        /* TPS Descriptor rate init */
        hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
@@ -138,7 +143,6 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
 
        /* QoS Rx buf size per TC */
        tc = 0;
-       is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
        buff_size = HW_ATL_B0_RXBUF_MAX;
 
        hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
@@ -150,7 +154,8 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
                                                   (buff_size *
                                                   (1024U / 32U) * 50U) /
                                                   100U, tc);
-       hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
+
+       hw_atl_b0_set_fc(self, self->aq_nic_cfg->flow_control, tc);
 
        /* QoS 802.1p priority -> TC mapping */
        for (i_priority = 8U; i_priority--;)
@@ -229,8 +234,10 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
        hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
 
        /* RX checksums offloads*/
-       hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
-       hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
+       hw_atl_rpo_ipv4header_crc_offload_en_set(self, !!(aq_nic_cfg->features &
+                                                NETIF_F_RXCSUM));
+       hw_atl_rpo_tcp_udp_crc_offload_en_set(self, !!(aq_nic_cfg->features &
+                                             NETIF_F_RXCSUM));
 
        /* LSO offloads*/
        hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
@@ -655,9 +662,9 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
                struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
                        &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
 
-               unsigned int is_err = 1U;
                unsigned int is_rx_check_sum_enabled = 0U;
                unsigned int pkt_type = 0U;
+               u8 rx_stat = 0U;
 
                if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
                        break;
@@ -665,35 +672,35 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
 
                buff = &ring->buff_ring[ring->hw_head];
 
-               is_err = (0x0000003CU & rxd_wb->status);
+               rx_stat = (0x0000003CU & rxd_wb->status) >> 2;
 
                is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
-               is_err &= ~0x20U; /* exclude validity bit */
 
                pkt_type = 0xFFU & (rxd_wb->type >> 4);
 
-               if (is_rx_check_sum_enabled) {
-                       if (0x0U == (pkt_type & 0x3U))
-                               buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U;
+               if (is_rx_check_sum_enabled & BIT(0) &&
+                   (0x0U == (pkt_type & 0x3U)))
+                       buff->is_ip_cso = (rx_stat & BIT(1)) ? 0U : 1U;
 
+               if (is_rx_check_sum_enabled & BIT(1)) {
                        if (0x4U == (pkt_type & 0x1CU))
-                               buff->is_udp_cso = buff->is_cso_err ? 0U : 1U;
+                               buff->is_udp_cso = (rx_stat & BIT(2)) ? 0U :
+                                                  !!(rx_stat & BIT(3));
                        else if (0x0U == (pkt_type & 0x1CU))
-                               buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U;
-
-                       /* Checksum offload workaround for small packets */
-                       if (rxd_wb->pkt_len <= 60) {
-                               buff->is_ip_cso = 0U;
-                               buff->is_cso_err = 0U;
-                       }
+                               buff->is_tcp_cso = (rx_stat & BIT(2)) ? 0U :
+                                                  !!(rx_stat & BIT(3));
+               }
+               buff->is_cso_err = !!(rx_stat & 0x6);
+               /* Checksum offload workaround for small packets */
+               if (unlikely(rxd_wb->pkt_len <= 60)) {
+                       buff->is_ip_cso = 0U;
+                       buff->is_cso_err = 0U;
                }
-
-               is_err &= ~0x18U;
 
                dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
 
-               if (is_err || rxd_wb->type & 0x1000U) {
-                       /* status error or DMA error */
+               if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
+                       /* MAC error or DMA error */
                        buff->is_error = 1U;
                } else {
                        if (self->aq_nic_cfg->is_rss) {
@@ -915,6 +922,12 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
 static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
 {
        hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
+
+       /* Invalidate Descriptor Cache to prevent writing to the cached
+        * descriptors and to the data pointer of those descriptors
+        */
+       hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1);
+
        return aq_hw_err_from_flags(self);
 }
 
@@ -963,4 +976,6 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
        .hw_get_regs                 = hw_atl_utils_hw_get_regs,
        .hw_get_hw_stats             = hw_atl_utils_get_hw_stats,
        .hw_get_fw_version           = hw_atl_utils_get_fw_version,
+       .hw_set_offload              = hw_atl_b0_hw_offload_set,
+       .hw_set_fc                   = hw_atl_b0_set_fc,
 };
index be0a3a90dfad6ac157ba81ab238ca43631e6df82..5502ec5f0f6993502cd8d4e880032a9606da5c34 100644 (file)
@@ -619,6 +619,14 @@ void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode
                            HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
 }
 
+void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init)
+{
+       aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
+                           HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
+                           HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT,
+                           init);
+}
+
 void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
                                            u32 rx_pkt_buff_size_per_tc, u32 buffer)
 {
index 7056c7342afcf2bf426a2e261cacd79b405c75a2..41f239928c157f121b74f086c35c86457f2a3aba 100644 (file)
@@ -325,6 +325,9 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
                                            u32 rx_pkt_buff_size_per_tc,
                                            u32 buffer);
 
+/* set rdm rx dma descriptor cache init */
+void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init);
+
 /* set rx xoff enable (per tc) */
 void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
                                      u32 buffer);
index 716674a9b729efc741ef4b434a20dd9dbd415b88..a715fa317b1c822781b4a0422033816b5684e7e3 100644 (file)
 /* default value of bitfield desc{d}_reset */
 #define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0
 
+/* rdm_desc_init_i bitfield definitions
+ * preprocessor definitions for the bitfield rdm_desc_init_i.
+ * port="pif_rdm_desc_init_i"
+ */
+
+/* register address for bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR 0x00005a00
+/* bitmask for bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK 0xffffffff
+/* inverted bitmask for bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSKN 0x00000000
+/* lower bit position of bitfield  rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT 0
+/* width of bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_WIDTH 32
+/* default value of bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0
+
 /* rx int_desc_wrb_en bitfield definitions
  * preprocessor definitions for the bitfield "int_desc_wrb_en".
  * port="pif_rdm_int_desc_wrb_en_i"
index 096ca5730887c1d4a47861c3a6a456cd240b2bd1..7de3220d9cab7bf99109145c1ccef4a3ea898d87 100644 (file)
@@ -30,6 +30,8 @@
 #define HW_ATL_FW2X_MPI_STATE_ADDR     0x370
 #define HW_ATL_FW2X_MPI_STATE2_ADDR    0x374
 
+#define HW_ATL_FW2X_CAP_PAUSE            BIT(CAPS_HI_PAUSE)
+#define HW_ATL_FW2X_CAP_ASYM_PAUSE       BIT(CAPS_HI_ASYMMETRIC_PAUSE)
 #define HW_ATL_FW2X_CAP_SLEEP_PROXY      BIT(CAPS_HI_SLEEP_PROXY)
 #define HW_ATL_FW2X_CAP_WOL              BIT(CAPS_HI_WOL)
 
@@ -451,6 +453,24 @@ static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
        return 0;
 }
 
+static u32 aq_fw2x_get_flow_control(struct aq_hw_s *self, u32 *fcmode)
+{
+       u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR);
+
+       if (mpi_state & HW_ATL_FW2X_CAP_PAUSE)
+               if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
+                       *fcmode = AQ_NIC_FC_RX;
+               else
+                       *fcmode = AQ_NIC_FC_RX | AQ_NIC_FC_TX;
+       else
+               if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
+                       *fcmode = AQ_NIC_FC_TX;
+               else
+                       *fcmode = 0;
+
+       return 0;
+}
+
 const struct aq_fw_ops aq_fw_2x_ops = {
        .init = aq_fw2x_init,
        .deinit = aq_fw2x_deinit,
@@ -465,4 +485,5 @@ const struct aq_fw_ops aq_fw_2x_ops = {
        .set_eee_rate = aq_fw2x_set_eee_rate,
        .get_eee_rate = aq_fw2x_get_eee_rate,
        .set_flow_control = aq_fw2x_set_flow_control,
+       .get_flow_control = aq_fw2x_get_flow_control
 };
index 78c5de467426f1e4276cebe8ee81cc0091d4c6fa..9d0e74f6b089df4c304ab49c2749d8bbba47c2e7 100644 (file)
@@ -140,6 +140,5 @@ struct alx_priv {
 };
 
 extern const struct ethtool_ops alx_ethtool_ops;
-extern const char alx_drv_name[];
 
 #endif
index 7968c644ad8617fef2fec1360e869a622c525a02..c131cfc1b79df5a62e048bbf1d15d070e7c0fced 100644 (file)
@@ -49,7 +49,7 @@
 #include "hw.h"
 #include "reg.h"
 
-const char alx_drv_name[] = "alx";
+static const char alx_drv_name[] = "alx";
 
 static void alx_free_txbuf(struct alx_tx_queue *txq, int entry)
 {
index 4122553e224b294d4eff1828201e467fcc5a60b9..0e2d99c737e35192b90d0bf3ce541ef2d6ecd4d1 100644 (file)
@@ -1902,9 +1902,6 @@ static void bcm_sysport_netif_start(struct net_device *dev)
                intrl2_1_mask_clear(priv, 0xffffffff);
        else
                intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
-
-       /* Last call before we start the real business */
-       netif_tx_start_all_queues(dev);
 }
 
 static void rbuf_init(struct bcm_sysport_priv *priv)
@@ -2048,6 +2045,8 @@ static int bcm_sysport_open(struct net_device *dev)
 
        bcm_sysport_netif_start(dev);
 
+       netif_tx_start_all_queues(dev);
+
        return 0;
 
 out_clear_rx_int:
@@ -2071,7 +2070,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev)
        struct bcm_sysport_priv *priv = netdev_priv(dev);
 
        /* stop all software from updating hardware */
-       netif_tx_stop_all_queues(dev);
+       netif_tx_disable(dev);
        napi_disable(&priv->napi);
        cancel_work_sync(&priv->dim.dim.work);
        phy_stop(dev->phydev);
@@ -2658,12 +2657,12 @@ static int __maybe_unused bcm_sysport_suspend(struct device *d)
        if (!netif_running(dev))
                return 0;
 
+       netif_device_detach(dev);
+
        bcm_sysport_netif_stop(dev);
 
        phy_suspend(dev->phydev);
 
-       netif_device_detach(dev);
-
        /* Disable UniMAC RX */
        umac_enable_set(priv, CMD_RX_EN, 0);
 
@@ -2746,8 +2745,6 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
                goto out_free_rx_ring;
        }
 
-       netif_device_attach(dev);
-
        /* RX pipe enable */
        topctrl_writel(priv, 0, RX_FLUSH_CNTL);
 
@@ -2788,6 +2785,8 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
 
        bcm_sysport_netif_start(dev);
 
+       netif_device_attach(dev);
+
        return 0;
 
 out_free_rx_ring:
index 20c1681bb1afeea35e23f20242abc0fe34fd1304..2d6f090bf6440cc7253fe4f0764b10bde618ff73 100644 (file)
@@ -2855,7 +2855,6 @@ static void bcmgenet_netif_start(struct net_device *dev)
 
        umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
 
-       netif_tx_start_all_queues(dev);
        bcmgenet_enable_tx_napi(priv);
 
        /* Monitor link interrupts now */
@@ -2937,6 +2936,8 @@ static int bcmgenet_open(struct net_device *dev)
 
        bcmgenet_netif_start(dev);
 
+       netif_tx_start_all_queues(dev);
+
        return 0;
 
 err_irq1:
@@ -2958,7 +2959,7 @@ static void bcmgenet_netif_stop(struct net_device *dev)
        struct bcmgenet_priv *priv = netdev_priv(dev);
 
        bcmgenet_disable_tx_napi(priv);
-       netif_tx_stop_all_queues(dev);
+       netif_tx_disable(dev);
 
        /* Disable MAC receive */
        umac_enable_set(priv, CMD_RX_EN, false);
@@ -3620,13 +3621,13 @@ static int bcmgenet_suspend(struct device *d)
        if (!netif_running(dev))
                return 0;
 
+       netif_device_detach(dev);
+
        bcmgenet_netif_stop(dev);
 
        if (!device_may_wakeup(d))
                phy_suspend(dev->phydev);
 
-       netif_device_detach(dev);
-
        /* Prepare the device for Wake-on-LAN and switch to the slow clock */
        if (device_may_wakeup(d) && priv->wolopts) {
                ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
@@ -3700,8 +3701,6 @@ static int bcmgenet_resume(struct device *d)
        /* Always enable ring 16 - descriptor ring */
        bcmgenet_enable_dma(priv, dma_ctrl);
 
-       netif_device_attach(dev);
-
        if (!device_may_wakeup(d))
                phy_resume(dev->phydev);
 
@@ -3710,6 +3709,8 @@ static int bcmgenet_resume(struct device *d)
 
        bcmgenet_netif_start(dev);
 
+       netif_device_attach(dev);
+
        return 0;
 
 out_clk_disable:
index 3f96aa30068ec3dcf991bec4ed022b3e68d013e7..20fcf0d1c2ce5f8ec986928019aefbd209088731 100644 (file)
@@ -3760,7 +3760,8 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
        /* Hardware table is only clear when pf resets */
        if (!(handle->flags & HNAE3_SUPPORT_VF)) {
                ret = hns3_restore_vlan(netdev);
-               return ret;
+               if (ret)
+                       return ret;
        }
 
        ret = hns3_restore_fd_rules(netdev);
index aa5cb9834d73a807dd18661c10069b7c929cc6d6..494e562fe8c7e9f2322b9659b8e60ec3abce26f0 100644 (file)
@@ -1168,14 +1168,14 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
  */
 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
 {
-       struct hclge_vport *vport = hdev->vport;
-       u32 i, k, qs_bitmap;
-       int ret;
+       int i;
 
        for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
-               qs_bitmap = 0;
+               u32 qs_bitmap = 0;
+               int k, ret;
 
                for (k = 0; k < hdev->num_alloc_vport; k++) {
+                       struct hclge_vport *vport = &hdev->vport[k];
                        u16 qs_id = vport->qs_offset + tc;
                        u8 grp, sub_grp;
 
@@ -1185,8 +1185,6 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
                                                  HCLGE_BP_SUB_GRP_ID_S);
                        if (i == grp)
                                qs_bitmap |= (1 << sub_grp);
-
-                       vport++;
                }
 
                ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
index 7893beffcc714215a5ed47fc8658b9db66ee9d3a..c9d5d0a7fbf172b14d70b33e62cb3344622b2ce7 100644 (file)
@@ -1545,7 +1545,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
        tx_crq.v1.sge_len = cpu_to_be32(skb->len);
        tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
 
-       if (adapter->vlan_header_insertion) {
+       if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
                tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
                tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
        }
index bc71a21c1dc2cfe7215a3ea124efd2dc11ccbf99..21c2688d63082ec25bb6f72e921d39ac3beedee8 100644 (file)
@@ -12249,6 +12249,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
                          NETIF_F_GSO_GRE               |
                          NETIF_F_GSO_GRE_CSUM          |
                          NETIF_F_GSO_PARTIAL           |
+                         NETIF_F_GSO_IPXIP4            |
+                         NETIF_F_GSO_IPXIP6            |
                          NETIF_F_GSO_UDP_TUNNEL        |
                          NETIF_F_GSO_UDP_TUNNEL_CSUM   |
                          NETIF_F_SCTP_CRC              |
@@ -12266,13 +12268,13 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
        /* record features VLANs can make use of */
        netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
 
-       if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
-               netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
-
        hw_features = hw_enc_features           |
                      NETIF_F_HW_VLAN_CTAG_TX   |
                      NETIF_F_HW_VLAN_CTAG_RX;
 
+       if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+               hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
+
        netdev->hw_features |= hw_features;
 
        netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
index 4c4b5717a627de6353f8f4248163f1f7e333714f..b8548370f1c722e61817c4857adae71f1cce18f0 100644 (file)
@@ -76,6 +76,8 @@ extern const char ice_drv_ver[];
 #define ICE_MIN_INTR_PER_VF            (ICE_MIN_QS_PER_VF + 1)
 #define ICE_DFLT_INTR_PER_VF           (ICE_DFLT_QS_PER_VF + 1)
 
+#define ICE_MAX_RESET_WAIT             20
+
 #define ICE_VSIQF_HKEY_ARRAY_SIZE      ((VSIQF_HKEY_MAX_INDEX + 1) *   4)
 
 #define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
@@ -189,7 +191,6 @@ struct ice_vsi {
        u64 tx_linearize;
        DECLARE_BITMAP(state, __ICE_STATE_NBITS);
        DECLARE_BITMAP(flags, ICE_VSI_FLAG_NBITS);
-       unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
        unsigned int current_netdev_flags;
        u32 tx_restart;
        u32 tx_busy;
@@ -369,5 +370,6 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
 int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
 void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
+void ice_napi_del(struct ice_vsi *vsi);
 
 #endif /* _ICE_H_ */
index 8cd6a2401fd9f2c1a804ba4ee3920e0309c87a4a..554fd707a6d69f45f165a6b77ef23bb027ac6baa 100644 (file)
@@ -811,6 +811,9 @@ void ice_deinit_hw(struct ice_hw *hw)
        /* Attempt to disable FW logging before shutting down control queues */
        ice_cfg_fw_log(hw, false);
        ice_shutdown_all_ctrlq(hw);
+
+       /* Clear VSI contexts if not already cleared */
+       ice_clear_all_vsi_ctx(hw);
 }
 
 /**
index 96923580f2a6c2fdb88c88c2f1e88f0b4154f67c..648acdb4c644b6c62d08d8f99c4307e1537d041f 100644 (file)
@@ -1517,10 +1517,15 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
        }
 
        if (!test_bit(__ICE_DOWN, pf->state)) {
-               /* Give it a little more time to try to come back */
+               /* Give it a little more time to try to come back. If still
+                * down, restart autoneg link or reinitialize the interface.
+                */
                msleep(75);
                if (!test_bit(__ICE_DOWN, pf->state))
                        return ice_nway_reset(netdev);
+
+               ice_down(vsi);
+               ice_up(vsi);
        }
 
        return err;
index 5fdea6ec7675b6d82b8c71c5bbe133b84280c79d..596b9fb1c510dec854004dd0bce6ef68a74ba8c6 100644 (file)
 #define GLNVM_ULD                              0x000B6008
 #define GLNVM_ULD_CORER_DONE_M                 BIT(3)
 #define GLNVM_ULD_GLOBR_DONE_M                 BIT(4)
+#define GLPCI_CNF2                             0x000BE004
+#define GLPCI_CNF2_CACHELINE_SIZE_M            BIT(1)
 #define PF_FUNC_RID                            0x0009E880
 #define PF_FUNC_RID_FUNC_NUM_S                 0
 #define PF_FUNC_RID_FUNC_NUM_M                 ICE_M(0x7, 0)
index 5bacad01f0c9c1c8cce2b4f56caca05b034688c2..1041fa2a7767878590930f1851c394eb67da80c5 100644 (file)
@@ -1997,7 +1997,7 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
        status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL);
        if (status) {
                netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
-                          ena ? "Ena" : "Dis", vsi->idx, vsi->vsi_num, status,
+                          ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
                           vsi->back->hw.adminq.sq_last_status);
                goto err_out;
        }
@@ -2458,6 +2458,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
         * on this wq
         */
        if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) {
+               ice_napi_del(vsi);
                unregister_netdev(vsi->netdev);
                free_netdev(vsi->netdev);
                vsi->netdev = NULL;
index 05993451147a09333602f79846f9d39e208d3037..333312a1d59572dfe8cef5bc02d745ab7c2920cf 100644 (file)
@@ -1465,7 +1465,7 @@ skip_req_irq:
  * ice_napi_del - Remove NAPI handler for the VSI
  * @vsi: VSI for which NAPI handler is to be removed
  */
-static void ice_napi_del(struct ice_vsi *vsi)
+void ice_napi_del(struct ice_vsi *vsi)
 {
        int v_idx;
 
@@ -1622,7 +1622,6 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
 {
        struct ice_netdev_priv *np = netdev_priv(netdev);
        struct ice_vsi *vsi = np->vsi;
-       int ret;
 
        if (vid >= VLAN_N_VID) {
                netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
@@ -1635,7 +1634,8 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
 
        /* Enable VLAN pruning when VLAN 0 is added */
        if (unlikely(!vid)) {
-               ret = ice_cfg_vlan_pruning(vsi, true);
+               int ret = ice_cfg_vlan_pruning(vsi, true);
+
                if (ret)
                        return ret;
        }
@@ -1644,12 +1644,7 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
         * needed to continue allowing all untagged packets since VLAN prune
         * list is applied to all packets by the switch
         */
-       ret = ice_vsi_add_vlan(vsi, vid);
-
-       if (!ret)
-               set_bit(vid, vsi->active_vlans);
-
-       return ret;
+       return ice_vsi_add_vlan(vsi, vid);
 }
 
 /**
@@ -1677,8 +1672,6 @@ static int ice_vlan_rx_kill_vid(struct net_device *netdev,
        if (status)
                return status;
 
-       clear_bit(vid, vsi->active_vlans);
-
        /* Disable VLAN pruning when VLAN 0 is removed */
        if (unlikely(!vid))
                status = ice_cfg_vlan_pruning(vsi, false);
@@ -2001,6 +1994,22 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
        return 0;
 }
 
+/**
+ * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
+ * @pf: pointer to the PF structure
+ *
+ * There is no error returned here because the driver should be able to handle
+ * 128 Byte cache lines, so we only print a warning in case issues are seen,
+ * specifically with Tx.
+ */
+static void ice_verify_cacheline_size(struct ice_pf *pf)
+{
+       if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
+               dev_warn(&pf->pdev->dev,
+                        "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
+                        ICE_CACHE_LINE_BYTES);
+}
+
 /**
  * ice_probe - Device initialization routine
  * @pdev: PCI device information struct
@@ -2151,6 +2160,8 @@ static int ice_probe(struct pci_dev *pdev,
        /* since everything is good, start the service timer */
        mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
 
+       ice_verify_cacheline_size(pf);
+
        return 0;
 
 err_alloc_sw_unroll:
@@ -2182,6 +2193,12 @@ static void ice_remove(struct pci_dev *pdev)
        if (!pf)
                return;
 
+       for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
+               if (!ice_is_reset_in_progress(pf->state))
+                       break;
+               msleep(100);
+       }
+
        set_bit(__ICE_DOWN, pf->state);
        ice_service_task_stop(pf);
 
@@ -2509,31 +2526,6 @@ static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
        return ret;
 }
 
-/**
- * ice_restore_vlan - Reinstate VLANs when vsi/netdev comes back up
- * @vsi: the VSI being brought back up
- */
-static int ice_restore_vlan(struct ice_vsi *vsi)
-{
-       int err;
-       u16 vid;
-
-       if (!vsi->netdev)
-               return -EINVAL;
-
-       err = ice_vsi_vlan_setup(vsi);
-       if (err)
-               return err;
-
-       for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) {
-               err = ice_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), vid);
-               if (err)
-                       break;
-       }
-
-       return err;
-}
-
 /**
  * ice_vsi_cfg - Setup the VSI
  * @vsi: the VSI being configured
@@ -2546,7 +2538,9 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
 
        if (vsi->netdev) {
                ice_set_rx_mode(vsi->netdev);
-               err = ice_restore_vlan(vsi);
+
+               err = ice_vsi_vlan_setup(vsi);
+
                if (err)
                        return err;
        }
@@ -3296,7 +3290,7 @@ static void ice_rebuild(struct ice_pf *pf)
        struct device *dev = &pf->pdev->dev;
        struct ice_hw *hw = &pf->hw;
        enum ice_status ret;
-       int err;
+       int err, i;
 
        if (test_bit(__ICE_DOWN, pf->state))
                goto clear_recovery;
@@ -3370,6 +3364,22 @@ static void ice_rebuild(struct ice_pf *pf)
        }
 
        ice_reset_all_vfs(pf, true);
+
+       for (i = 0; i < pf->num_alloc_vsi; i++) {
+               bool link_up;
+
+               if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF)
+                       continue;
+               ice_get_link_status(pf->vsi[i]->port_info, &link_up);
+               if (link_up) {
+                       netif_carrier_on(pf->vsi[i]->netdev);
+                       netif_tx_wake_all_queues(pf->vsi[i]->netdev);
+               } else {
+                       netif_carrier_off(pf->vsi[i]->netdev);
+                       netif_tx_stop_all_queues(pf->vsi[i]->netdev);
+               }
+       }
+
        /* if we get here, reset flow is successful */
        clear_bit(__ICE_RESET_FAILED, pf->state);
        return;
index 33403f39f1b3f8680dcf5b63c37956a5df2d0fad..40c9c65589568b34a1eb5ec50d842e41ddd030e3 100644 (file)
@@ -347,6 +347,18 @@ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
        }
 }
 
+/**
+ * ice_clear_all_vsi_ctx - clear all the VSI context entries
+ * @hw: pointer to the hw struct
+ */
+void ice_clear_all_vsi_ctx(struct ice_hw *hw)
+{
+       u16 i;
+
+       for (i = 0; i < ICE_MAX_VSI; i++)
+               ice_clear_vsi_ctx(hw, i);
+}
+
 /**
  * ice_add_vsi - add VSI context to the hardware and VSI handle list
  * @hw: pointer to the hw struct
index b88d96a1ef6935c2564e07e3443378b65f32b7ea..d5ef0bd58bf9789260bbf7575868f16a6123ec81 100644 (file)
@@ -190,6 +190,8 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
               struct ice_sq_cd *cd);
 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle);
+void ice_clear_all_vsi_ctx(struct ice_hw *hw);
+/* Switch config */
 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
 
 /* Switch/bridge related commands */
index 5dae968d853e17b88344d3852c0b40a7ac133f66..fe5bbabbb41eacdac1bcd0466196ee835b35b97f 100644 (file)
@@ -1520,7 +1520,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
 
        /* update gso_segs and bytecount */
        first->gso_segs = skb_shinfo(skb)->gso_segs;
-       first->bytecount = (first->gso_segs - 1) * off->header_len;
+       first->bytecount += (first->gso_segs - 1) * off->header_len;
 
        cd_tso_len = skb->len - off->header_len;
        cd_mss = skb_shinfo(skb)->gso_size;
@@ -1556,15 +1556,15 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
  * magnitude greater than our largest possible GSO size.
  *
  * This would then be implemented as:
- *     return (((size >> 12) * 85) >> 8) + 1;
+ *     return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
  *
  * Since multiplication and division are commutative, we can reorder
  * operations into:
- *     return ((size * 85) >> 20) + 1;
+ *     return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
  */
 static unsigned int ice_txd_use_count(unsigned int size)
 {
-       return ((size * 85) >> 20) + 1;
+       return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
 }
 
 /**
@@ -1706,7 +1706,8 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
         *       + 1 desc for context descriptor,
         * otherwise try next time
         */
-       if (ice_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+       if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
+                             ICE_DESCS_FOR_CTX_DESC)) {
                tx_ring->tx_stats.tx_busy++;
                return NETDEV_TX_BUSY;
        }
index 1d0f58bd389bd35d9c5aad257e0d41c12c9ff1cd..75d0eaf6c9ddbe18a26c04d1b9edfae4322e16d7 100644 (file)
 #define ICE_RX_BUF_WRITE       16      /* Must be power of 2 */
 #define ICE_MAX_TXQ_PER_TXQG   128
 
-/* Tx Descriptors needed, worst case */
-#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+/* We are assuming that the cache line is always 64 Bytes here for ice.
+ * In order to make sure that is a correct assumption there is a check in probe
+ * to print a warning if the read from GLPCI_CNF2 tells us that the cache line
+ * size is 128 bytes. We do it this way because we do not want to read the
+ * GLPCI_CNF2 register or a variable containing the value on every pass through
+ * the Tx path.
+ */
+#define ICE_CACHE_LINE_BYTES           64
+#define ICE_DESCS_PER_CACHE_LINE       (ICE_CACHE_LINE_BYTES / \
+                                        sizeof(struct ice_tx_desc))
+#define ICE_DESCS_FOR_CTX_DESC         1
+#define ICE_DESCS_FOR_SKB_DATA_PTR     1
+/* Tx descriptors needed, worst case */
+#define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
+                    ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
 #define ICE_DESC_UNUSED(R)     \
        ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
        (R)->next_to_clean - (R)->next_to_use - 1)
index 12f9432abf11099f2c0eecb3bfe289ee801134f7..f4dbc81c198863b5037f6acc07ed053522923065 100644 (file)
@@ -92,12 +92,12 @@ struct ice_link_status {
        u64 phy_type_low;
        u16 max_frame_size;
        u16 link_speed;
+       u16 req_speeds;
        u8 lse_ena;     /* Link Status Event notification */
        u8 link_info;
        u8 an_info;
        u8 ext_info;
        u8 pacing;
-       u8 req_speeds;
        /* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of
         * ice_aqc_get_phy_caps structure
         */
index 45f10f8f01dc1ba0e39cd1e87ba16a11c5fbbe7f..e71065f9d3918a7623ac1bb517c14c7c9b632a39 100644 (file)
@@ -348,7 +348,7 @@ static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
        struct ice_vsi_ctx ctxt = { 0 };
        enum ice_status status;
 
-       ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_TAGGED |
+       ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
                               ICE_AQ_VSI_PVLAN_INSERT_PVID |
                               ICE_AQ_VSI_VLAN_EMOD_STR;
        ctxt.info.pvid = cpu_to_le16(vid);
@@ -2171,7 +2171,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
 
                        if (!ice_vsi_add_vlan(vsi, vid)) {
                                vf->num_vlan++;
-                               set_bit(vid, vsi->active_vlans);
 
                                /* Enable VLAN pruning when VLAN 0 is added */
                                if (unlikely(!vid))
@@ -2190,7 +2189,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
                         */
                        if (!ice_vsi_kill_vlan(vsi, vid)) {
                                vf->num_vlan--;
-                               clear_bit(vid, vsi->active_vlans);
 
                                /* Disable VLAN pruning when removing VLAN 0 */
                                if (unlikely(!vid))
index 29ced6b74d364632113e9674f8d005257e548411..2b95dc9c7a6a8bd3fd0a2d03365e282b26382e70 100644 (file)
  *   2^40 * 10^-9 /  60  = 18.3 minutes.
  *
  * SYSTIM is converted to real time using a timecounter. As
- * timecounter_cyc2time() allows old timestamps, the timecounter
- * needs to be updated at least once per half of the SYSTIM interval.
- * Scheduling of delayed work is not very accurate, so we aim for 8
- * minutes to be sure the actual interval is shorter than 9.16 minutes.
+ * timecounter_cyc2time() allows old timestamps, the timecounter needs
+ * to be updated at least once per half of the SYSTIM interval.
+ * Scheduling of delayed work is not very accurate, and also the NIC
+ * clock can be adjusted to run up to 6% faster and the system clock
+ * up to 10% slower, so we aim for 6 minutes to be sure the actual
+ * interval in the NIC time is shorter than 9.16 minutes.
  */
 
-#define IGB_SYSTIM_OVERFLOW_PERIOD     (HZ * 60 * 8)
+#define IGB_SYSTIM_OVERFLOW_PERIOD     (HZ * 60 * 6)
 #define IGB_PTP_TX_TIMEOUT             (HZ * 15)
 #define INCPERIOD_82576                        BIT(E1000_TIMINCA_16NS_SHIFT)
 #define INCVALUE_82576_MASK            GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0)
index 5bfd349bf41ac58ffaa003ecdca1d6493f4f42eb..3ba672e9e353d2cf6cd38ad64172e4219b9f5602 100644 (file)
@@ -494,7 +494,7 @@ struct mvneta_port {
 #if defined(__LITTLE_ENDIAN)
 struct mvneta_tx_desc {
        u32  command;           /* Options used by HW for packet transmitting.*/
-       u16  reserverd1;        /* csum_l4 (for future use)             */
+       u16  reserved1;         /* csum_l4 (for future use)             */
        u16  data_size;         /* Data size of transmitted packet in bytes */
        u32  buf_phys_addr;     /* Physical addr of transmitted buffer  */
        u32  reserved2;         /* hw_cmd - (for future use, PMT)       */
@@ -519,7 +519,7 @@ struct mvneta_rx_desc {
 #else
 struct mvneta_tx_desc {
        u16  data_size;         /* Data size of transmitted packet in bytes */
-       u16  reserverd1;        /* csum_l4 (for future use)             */
+       u16  reserved1;         /* csum_l4 (for future use)             */
        u32  command;           /* Options used by HW for packet transmitting.*/
        u32  reserved2;         /* hw_cmd - (for future use, PMT)       */
        u32  buf_phys_addr;     /* Physical addr of transmitted buffer  */
index 1857ee0f0871d48285a6d3711f7c3e9a1e08a05f..6f5153afcab4dfc331c099da854c54f1b9500887 100644 (file)
@@ -1006,7 +1006,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                ring->packets++;
        }
        ring->bytes += tx_info->nr_bytes;
-       netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
        AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
 
        if (tx_info->inl)
@@ -1044,7 +1043,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                netif_tx_stop_queue(ring->tx_queue);
                ring->queue_stopped++;
        }
-       send_doorbell = !skb->xmit_more || netif_xmit_stopped(ring->tx_queue);
+
+       send_doorbell = __netdev_tx_sent_queue(ring->tx_queue,
+                                              tx_info->nr_bytes,
+                                              skb->xmit_more);
 
        real_size = (real_size / 16) & 0x3f;
 
index a2df12b79f8e915702323f9258972260135c094c..9bec940330a450856d2dba23ed7274321cf82059 100644 (file)
@@ -3568,7 +3568,6 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
                        burst_size = 7;
                        break;
                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
-                       is_bytes = true;
                        rate = 4 * 1024;
                        burst_size = 4;
                        break;
index cc1b373c0ace56e08564d3527de9f5da3f87b4e4..46dc93d3b9b53db6586b791bc6ffcf65b756daba 100644 (file)
@@ -147,7 +147,8 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
                       "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n",
                       fcoe_pf_params->num_cqs,
                       p_hwfn->hw_info.feat_num[QED_FCOE_CQ]);
-               return -EINVAL;
+               rc = -EINVAL;
+               goto err;
        }
 
        p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
@@ -156,14 +157,14 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
 
        rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
        if (rc)
-               return rc;
+               goto err;
 
        cxt_info.iid = dummy_cid;
        rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
        if (rc) {
                DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
                          dummy_cid);
-               return rc;
+               goto err;
        }
        p_cxt = cxt_info.p_cxt;
        SET_FIELD(p_cxt->tstorm_ag_context.flags3,
@@ -240,6 +241,10 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
        rc = qed_spq_post(p_hwfn, p_ent, NULL);
 
        return rc;
+
+err:
+       qed_sp_destroy_request(p_hwfn, p_ent);
+       return rc;
 }
 
 static int
index 1135387bd99d704f517679c4716760e39acce52c..4f8a685d1a55febcf78e3213a1c56130c8535213 100644 (file)
@@ -200,6 +200,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
                       "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n",
                       p_params->num_queues,
                       p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]);
+               qed_sp_destroy_request(p_hwfn, p_ent);
                return -EINVAL;
        }
 
index 82a1bd1f8a8ce3fd66acc6b0cc0c9e7bf6a57305..67c02ea939062dea70ae6e806546fd8266dd08cf 100644 (file)
@@ -740,8 +740,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
 
        rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
        if (rc) {
-               /* Return spq entry which is taken in qed_sp_init_request()*/
-               qed_spq_return_entry(p_hwfn, p_ent);
+               qed_sp_destroy_request(p_hwfn, p_ent);
                return rc;
        }
 
@@ -1355,6 +1354,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
                        DP_NOTICE(p_hwfn,
                                  "%d is not supported yet\n",
                                  p_filter_cmd->opcode);
+                       qed_sp_destroy_request(p_hwfn, *pp_ent);
                        return -EINVAL;
                }
 
@@ -2056,13 +2056,13 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
        } else {
                rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
                if (rc)
-                       return rc;
+                       goto err;
 
                if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
                        rc = qed_fw_l2_queue(p_hwfn, p_params->qid,
                                             &abs_rx_q_id);
                        if (rc)
-                               return rc;
+                               goto err;
 
                        p_ramrod->rx_qid_valid = 1;
                        p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
@@ -2083,6 +2083,10 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
                   (u64)p_params->addr, p_params->length);
 
        return qed_spq_post(p_hwfn, p_ent, NULL);
+
+err:
+       qed_sp_destroy_request(p_hwfn, p_ent);
+       return rc;
 }
 
 int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
index f40f654398a0782457240fa74bf81e0c65d7bf32..a96364df43203dbfe9b326a385a50b53dd1900c9 100644 (file)
@@ -1944,9 +1944,12 @@ int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
                             struct qed_ptt *p_ptt, u32 *p_speed_mask)
 {
        u32 transceiver_type, transceiver_state;
+       int ret;
 
-       qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
-                                    &transceiver_type);
+       ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
+                                          &transceiver_type);
+       if (ret)
+               return ret;
 
        if (qed_is_transceiver_ready(transceiver_state, transceiver_type) ==
                                     false)
index c71391b9c757a1b03f55f21cc641c4718bbce719..62113438c8809c34e5c2dc48bccda67fc9ae21ad 100644 (file)
@@ -1514,6 +1514,7 @@ qed_rdma_register_tid(void *rdma_cxt,
        default:
                rc = -EINVAL;
                DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+               qed_sp_destroy_request(p_hwfn, p_ent);
                return rc;
        }
        SET_FIELD(p_ramrod->flags1,
index f9167d1354bbef3ccf2e972e8c002e64bbc24cce..e49fada854108718bf1dc5ea45fda2d4d264ded2 100644 (file)
@@ -745,6 +745,7 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
                DP_NOTICE(p_hwfn,
                          "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
                          rc);
+               qed_sp_destroy_request(p_hwfn, p_ent);
                return rc;
        }
 
index e95431f6acd46fb6ace4c20cfe227388c890cdea..3157c0d9944177e784a62ef983dd2adc3b5c0f11 100644 (file)
@@ -167,6 +167,9 @@ struct qed_spq_entry {
        enum spq_mode                   comp_mode;
        struct qed_spq_comp_cb          comp_cb;
        struct qed_spq_comp_done        comp_done; /* SPQ_MODE_EBLOCK */
+
+       /* Posted entry for unlimited list entry in EBLOCK mode */
+       struct qed_spq_entry            *post_ent;
 };
 
 struct qed_eq {
@@ -396,6 +399,17 @@ struct qed_sp_init_data {
        struct qed_spq_comp_cb *p_comp_data;
 };
 
+/**
+ * @brief Returns a SPQ entry to the pool / frees the entry if allocated.
+ *        Should be called on in error flows after initializing the SPQ entry
+ *        and before posting it.
+ *
+ * @param p_hwfn
+ * @param p_ent
+ */
+void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
+                           struct qed_spq_entry *p_ent);
+
 int qed_sp_init_request(struct qed_hwfn *p_hwfn,
                        struct qed_spq_entry **pp_ent,
                        u8 cmd,
index 77b6248ad3b97d3a45caf27825faddabf9695a5b..888274fa208bc768b2ab9db2514407573bfab2e1 100644 (file)
 #include "qed_sp.h"
 #include "qed_sriov.h"
 
+void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
+                           struct qed_spq_entry *p_ent)
+{
+       /* qed_spq_get_entry() can either get an entry from the free_pool,
+        * or, if no entries are left, allocate a new entry and add it to
+        * the unlimited_pending list.
+        */
+       if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending)
+               kfree(p_ent);
+       else
+               qed_spq_return_entry(p_hwfn, p_ent);
+}
+
 int qed_sp_init_request(struct qed_hwfn *p_hwfn,
                        struct qed_spq_entry **pp_ent,
                        u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
@@ -80,7 +93,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
 
        case QED_SPQ_MODE_BLOCK:
                if (!p_data->p_comp_data)
-                       return -EINVAL;
+                       goto err;
 
                p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
                break;
@@ -95,7 +108,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
        default:
                DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
                          p_ent->comp_mode);
-               return -EINVAL;
+               goto err;
        }
 
        DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
@@ -109,6 +122,11 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
        memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
 
        return 0;
+
+err:
+       qed_sp_destroy_request(p_hwfn, p_ent);
+
+       return -EINVAL;
 }
 
 static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type)
index c4a6274dd625c2bf419cc78dfab20f899ada494d..0a9c5bb0fa486658a23132680a1aeddb9a72b518 100644 (file)
@@ -142,6 +142,7 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
 
        DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
        rc = qed_mcp_drain(p_hwfn, p_ptt);
+       qed_ptt_release(p_hwfn, p_ptt);
        if (rc) {
                DP_NOTICE(p_hwfn, "MCP drain failed\n");
                goto err;
@@ -150,18 +151,15 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
        /* Retry after drain */
        rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
        if (!rc)
-               goto out;
+               return 0;
 
        comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
-       if (comp_done->done == 1)
+       if (comp_done->done == 1) {
                if (p_fw_ret)
                        *p_fw_ret = comp_done->fw_return_code;
-out:
-       qed_ptt_release(p_hwfn, p_ptt);
-       return 0;
-
+               return 0;
+       }
 err:
-       qed_ptt_release(p_hwfn, p_ptt);
        DP_NOTICE(p_hwfn,
                  "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
                  le32_to_cpu(p_ent->elem.hdr.cid),
@@ -685,6 +683,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
                        /* EBLOCK responsible to free the allocated p_ent */
                        if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
                                kfree(p_ent);
+                       else
+                               p_ent->post_ent = p_en2;
 
                        p_ent = p_en2;
                }
@@ -767,6 +767,25 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
                                 SPQ_HIGH_PRI_RESERVE_DEFAULT);
 }
 
+/* Avoid overriding of SPQ entries when getting out-of-order completions, by
+ * marking the completions in a bitmap and increasing the chain consumer only
+ * for the first successive completed entries.
+ */
+static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
+{
+       u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
+       struct qed_spq *p_spq = p_hwfn->p_spq;
+
+       __set_bit(pos, p_spq->p_comp_bitmap);
+       while (test_bit(p_spq->comp_bitmap_idx,
+                       p_spq->p_comp_bitmap)) {
+               __clear_bit(p_spq->comp_bitmap_idx,
+                           p_spq->p_comp_bitmap);
+               p_spq->comp_bitmap_idx++;
+               qed_chain_return_produced(&p_spq->chain);
+       }
+}
+
 int qed_spq_post(struct qed_hwfn *p_hwfn,
                 struct qed_spq_entry *p_ent, u8 *fw_return_code)
 {
@@ -824,11 +843,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
                                   p_ent->queue == &p_spq->unlimited_pending);
 
                if (p_ent->queue == &p_spq->unlimited_pending) {
-                       /* This is an allocated p_ent which does not need to
-                        * return to pool.
-                        */
+                       struct qed_spq_entry *p_post_ent = p_ent->post_ent;
+
                        kfree(p_ent);
-                       return rc;
+
+                       /* Return the entry which was actually posted */
+                       p_ent = p_post_ent;
                }
 
                if (rc)
@@ -842,7 +862,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
 spq_post_fail2:
        spin_lock_bh(&p_spq->lock);
        list_del(&p_ent->list);
-       qed_chain_return_produced(&p_spq->chain);
+       qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
 
 spq_post_fail:
        /* return to the free pool */
@@ -874,25 +894,8 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
        spin_lock_bh(&p_spq->lock);
        list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
                if (p_ent->elem.hdr.echo == echo) {
-                       u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
-
                        list_del(&p_ent->list);
-
-                       /* Avoid overriding of SPQ entries when getting
-                        * out-of-order completions, by marking the completions
-                        * in a bitmap and increasing the chain consumer only
-                        * for the first successive completed entries.
-                        */
-                       __set_bit(pos, p_spq->p_comp_bitmap);
-
-                       while (test_bit(p_spq->comp_bitmap_idx,
-                                       p_spq->p_comp_bitmap)) {
-                               __clear_bit(p_spq->comp_bitmap_idx,
-                                           p_spq->p_comp_bitmap);
-                               p_spq->comp_bitmap_idx++;
-                               qed_chain_return_produced(&p_spq->chain);
-                       }
-
+                       qed_spq_comp_bmap_update(p_hwfn, echo);
                        p_spq->comp_count++;
                        found = p_ent;
                        break;
@@ -931,11 +934,9 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
                           QED_MSG_SPQ,
                           "Got a completion without a callback function\n");
 
-       if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
-           (found->queue == &p_spq->unlimited_pending))
+       if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
                /* EBLOCK  is responsible for returning its own entry into the
-                * free list, unless it originally added the entry into the
-                * unlimited pending list.
+                * free list.
                 */
                qed_spq_return_entry(p_hwfn, found);
 
index 9b08a9d9e15130f0518b1f7608bbaa36e6eb15b0..ca6290fa0f30940265ca1590de148c94eb2cf18e 100644 (file)
@@ -101,6 +101,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
        default:
                DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
                          p_hwfn->hw_info.personality);
+               qed_sp_destroy_request(p_hwfn, p_ent);
                return -EINVAL;
        }
 
index 9647578cbe6a8fec82409c4eadf9aee02f6c7971..14f26bf3b388bdce2913241d5790ec52617c249d 100644 (file)
@@ -459,7 +459,7 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
                         struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
                         struct qlcnic_host_tx_ring *tx_ring)
 {
-       u8 l4proto, opcode = 0, hdr_len = 0;
+       u8 l4proto, opcode = 0, hdr_len = 0, tag_vlan = 0;
        u16 flags = 0, vlan_tci = 0;
        int copied, offset, copy_len, size;
        struct cmd_desc_type0 *hwdesc;
@@ -472,14 +472,16 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
                flags = QLCNIC_FLAGS_VLAN_TAGGED;
                vlan_tci = ntohs(vh->h_vlan_TCI);
                protocol = ntohs(vh->h_vlan_encapsulated_proto);
+               tag_vlan = 1;
        } else if (skb_vlan_tag_present(skb)) {
                flags = QLCNIC_FLAGS_VLAN_OOB;
                vlan_tci = skb_vlan_tag_get(skb);
+               tag_vlan = 1;
        }
        if (unlikely(adapter->tx_pvid)) {
-               if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
+               if (tag_vlan && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
                        return -EIO;
-               if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
+               if (tag_vlan && (adapter->flags & QLCNIC_TAGGING_ENABLED))
                        goto set_flags;
 
                flags = QLCNIC_FLAGS_VLAN_OOB;
index 0afc3d335d562d24466b9192aea291b910ebcdfe..d11c16aeb19ad45759c44e1dac2bb259cf976054 100644 (file)
@@ -234,7 +234,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
                      struct net_device *real_dev,
                      struct rmnet_endpoint *ep)
 {
-       struct rmnet_priv *priv;
+       struct rmnet_priv *priv = netdev_priv(rmnet_dev);
        int rc;
 
        if (ep->egress_dev)
@@ -247,6 +247,8 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
        rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
        rmnet_dev->hw_features |= NETIF_F_SG;
 
+       priv->real_dev = real_dev;
+
        rc = register_netdevice(rmnet_dev);
        if (!rc) {
                ep->egress_dev = rmnet_dev;
@@ -255,9 +257,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
 
                rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
 
-               priv = netdev_priv(rmnet_dev);
                priv->mux_id = id;
-               priv->real_dev = real_dev;
 
                netdev_dbg(rmnet_dev, "rmnet dev created\n");
        }
index b1b305f8f4143626fc664445182c5e2afc38b87c..272b9ca663148f36ccb7ae45363df773f2dd4c4c 100644 (file)
@@ -365,7 +365,8 @@ struct dma_features {
 
 /* GMAC TX FIFO is 8K, Rx FIFO is 16K */
 #define BUF_SIZE_16KiB 16384
-#define BUF_SIZE_8KiB 8192
+/* RX Buffer size must be < 8191 and multiple of 4/8/16 bytes */
+#define BUF_SIZE_8KiB 8188
 #define BUF_SIZE_4KiB 4096
 #define BUF_SIZE_2KiB 2048
 
index ca9d7e48034ceb33f5f4eb4db5b99691ed1a278f..40d6356a7e73c213f0d1d073387b8605bb4f3726 100644 (file)
@@ -31,7 +31,7 @@
 /* Enhanced descriptors */
 static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
 {
-       p->des1 |= cpu_to_le32(((BUF_SIZE_8KiB - 1)
+       p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
                        << ERDES1_BUFFER2_SIZE_SHIFT)
                   & ERDES1_BUFFER2_SIZE_MASK);
 
index 77914c89d7497de6f9a251196fe079f49364ee13..5ef91a790f9d16fbd122f71e130cf7ecf5249a68 100644 (file)
@@ -262,7 +262,7 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
                                  int mode, int end)
 {
        p->des0 |= cpu_to_le32(RDES0_OWN);
-       p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK);
+       p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK);
 
        if (mode == STMMAC_CHAIN_MODE)
                ehn_desc_rx_set_on_chain(p);
index abc3f85270cd0709e667a17112ff2d9d7f4ff8f4..d8c5bc4122195d73f7150f2775797cc6ba9a3393 100644 (file)
@@ -140,7 +140,7 @@ static void clean_desc3(void *priv_ptr, struct dma_desc *p)
 static int set_16kib_bfsize(int mtu)
 {
        int ret = 0;
-       if (unlikely(mtu >= BUF_SIZE_8KiB))
+       if (unlikely(mtu > BUF_SIZE_8KiB))
                ret = BUF_SIZE_16KiB;
        return ret;
 }
index 3b7f10a5f06a660fbf6408f7adbcd0851ad79921..c5cae8e74dc40720eb2db3a3d91118a8ceece281 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
 /*     FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices.
  *
  *     Copyright (c) 2018  Maciej W. Rozycki
@@ -56,7 +56,7 @@
 #define DRV_VERSION "v.1.1.4"
 #define DRV_RELDATE "Oct  6 2018"
 
-static char version[] =
+static const char version[] =
        DRV_NAME ": " DRV_VERSION "  " DRV_RELDATE "  Maciej W. Rozycki\n";
 
 MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
@@ -784,7 +784,7 @@ err_rx:
 static void fza_tx_smt(struct net_device *dev)
 {
        struct fza_private *fp = netdev_priv(dev);
-       struct fza_buffer_tx __iomem *smt_tx_ptr, *skb_data_ptr;
+       struct fza_buffer_tx __iomem *smt_tx_ptr;
        int i, len;
        u32 own;
 
@@ -799,6 +799,7 @@ static void fza_tx_smt(struct net_device *dev)
 
                if (!netif_queue_stopped(dev)) {
                        if (dev_nit_active(dev)) {
+                               struct fza_buffer_tx *skb_data_ptr;
                                struct sk_buff *skb;
 
                                /* Length must be a multiple of 4 as only word
index b06acf32738ea5f32083d4403a3b3c95315396d0..93bda61be8e382646f3c9213e519690ef8cf3ab4 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*     FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices.
  *
  *     Copyright (c) 2018  Maciej W. Rozycki
@@ -235,6 +235,7 @@ struct fza_ring_cmd {
 #define FZA_RING_CMD           0x200400        /* command ring address */
 #define FZA_RING_CMD_SIZE      0x40            /* command descriptor ring
                                                 * size
+                                                */
 /* Command constants. */
 #define FZA_RING_CMD_MASK      0x7fffffff
 #define FZA_RING_CMD_NOP       0x00000000      /* nop */
index e86ea105c8022290bf1b02d781fe536d90d74d5a..70453701045371e9d2b64cd3f65e070c4b20faeb 100644 (file)
@@ -92,7 +92,7 @@ static int bcm54612e_config_init(struct phy_device *phydev)
        return 0;
 }
 
-static int bcm5481x_config(struct phy_device *phydev)
+static int bcm54xx_config_clock_delay(struct phy_device *phydev)
 {
        int rc, val;
 
@@ -429,7 +429,7 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
        ret = genphy_config_aneg(phydev);
 
        /* Then we can set up the delay. */
-       bcm5481x_config(phydev);
+       bcm54xx_config_clock_delay(phydev);
 
        if (of_property_read_bool(np, "enet-phy-lane-swap")) {
                /* Lane Swap - Undocumented register...magic! */
@@ -442,6 +442,19 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
        return ret;
 }
 
+static int bcm54616s_config_aneg(struct phy_device *phydev)
+{
+       int ret;
+
+       /* Aneg firsly. */
+       ret = genphy_config_aneg(phydev);
+
+       /* Then we can set up the delay. */
+       bcm54xx_config_clock_delay(phydev);
+
+       return ret;
+}
+
 static int brcm_phy_setbits(struct phy_device *phydev, int reg, int set)
 {
        int val;
@@ -636,6 +649,7 @@ static struct phy_driver broadcom_drivers[] = {
        .features       = PHY_GBIT_FEATURES,
        .flags          = PHY_HAS_INTERRUPT,
        .config_init    = bcm54xx_config_init,
+       .config_aneg    = bcm54616s_config_aneg,
        .ack_interrupt  = bcm_phy_ack_intr,
        .config_intr    = bcm_phy_config_intr,
 }, {
index 7fc8508b5231d94beab4c45bf7666d15d4ef786f..271e8adc39f1005dcc48b678ef528d442f12b9f8 100644 (file)
@@ -220,7 +220,7 @@ static struct phy_driver realtek_drvs[] = {
                .flags          = PHY_HAS_INTERRUPT,
        }, {
                .phy_id         = 0x001cc816,
-               .name           = "RTL8201F 10/100Mbps Ethernet",
+               .name           = "RTL8201F Fast Ethernet",
                .phy_id_mask    = 0x001fffff,
                .features       = PHY_BASIC_FEATURES,
                .flags          = PHY_HAS_INTERRUPT,
index 262e7a3c23cb67fbfd66b81ed0d26af0f0480d84..f2d01cb6f958cd3235dba2383950dd5fa57d8c08 100644 (file)
@@ -1321,6 +1321,8 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
        dev->net->ethtool_ops = &smsc95xx_ethtool_ops;
        dev->net->flags |= IFF_MULTICAST;
        dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
+       dev->net->min_mtu = ETH_MIN_MTU;
+       dev->net->max_mtu = ETH_DATA_LEN;
        dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
 
        pdata->dev = dev;
@@ -1598,6 +1600,8 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
                return ret;
        }
 
+       cancel_delayed_work_sync(&pdata->carrier_check);
+
        if (pdata->suspend_flags) {
                netdev_warn(dev->net, "error during last resume\n");
                pdata->suspend_flags = 0;
@@ -1840,6 +1844,11 @@ done:
         */
        if (ret && PMSG_IS_AUTO(message))
                usbnet_resume(intf);
+
+       if (ret)
+               schedule_delayed_work(&pdata->carrier_check,
+                                     CARRIER_CHECK_DELAY);
+
        return ret;
 }
 
index 2e65be8b1387af92a2b099478c36e5a28e6d7ffc..559d567693b8d060b920952e8429d219471b9569 100644 (file)
@@ -1519,8 +1519,10 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
        if (ns->ndev)
                nvme_nvm_update_nvm_info(ns);
 #ifdef CONFIG_NVME_MULTIPATH
-       if (ns->head->disk)
+       if (ns->head->disk) {
                nvme_update_disk_info(ns->head->disk, ns, id);
+               blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
+       }
 #endif
 }
 
index 5e3cc8c59a394fce6ba25f1c621b26903185963d..9901afd804ce3720709c198fb54140d2a2ea3d85 100644 (file)
@@ -285,6 +285,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
        blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
        /* set to a default value for 512 until disk is validated */
        blk_queue_logical_block_size(q, 512);
+       blk_set_stacking_limits(&q->limits);
 
        /* we need to propagate up the VMC settings */
        if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
index f4efe289dc7bc2caa8ce3c2a1a44b97e66cd0324..a5f9bbce863f42dcff6c23759801fa9c48d0b210 100644 (file)
@@ -420,7 +420,7 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
        struct pci_dev *p2p_dev;
        int ret;
 
-       if (!ctrl->p2p_client)
+       if (!ctrl->p2p_client || !ns->use_p2pmem)
                return;
 
        if (ns->p2p_dev) {
index ddce100be57a48f883558e147669a3d06f1046bb..3f7971d3706d90d5fbf382072ed8d2da2ac6e8b6 100644 (file)
@@ -122,7 +122,6 @@ struct nvmet_rdma_device {
        int                     inline_page_count;
 };
 
-static struct workqueue_struct *nvmet_rdma_delete_wq;
 static bool nvmet_rdma_use_srq;
 module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
 MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
@@ -1274,12 +1273,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
 
        if (queue->host_qid == 0) {
                /* Let inflight controller teardown complete */
-               flush_workqueue(nvmet_rdma_delete_wq);
+               flush_scheduled_work();
        }
 
        ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
        if (ret) {
-               queue_work(nvmet_rdma_delete_wq, &queue->release_work);
+               schedule_work(&queue->release_work);
                /* Destroying rdma_cm id is not needed here */
                return 0;
        }
@@ -1344,7 +1343,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
 
        if (disconnect) {
                rdma_disconnect(queue->cm_id);
-               queue_work(nvmet_rdma_delete_wq, &queue->release_work);
+               schedule_work(&queue->release_work);
        }
 }
 
@@ -1374,7 +1373,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
        mutex_unlock(&nvmet_rdma_queue_mutex);
 
        pr_err("failed to connect queue %d\n", queue->idx);
-       queue_work(nvmet_rdma_delete_wq, &queue->release_work);
+       schedule_work(&queue->release_work);
 }
 
 /**
@@ -1656,17 +1655,8 @@ static int __init nvmet_rdma_init(void)
        if (ret)
                goto err_ib_client;
 
-       nvmet_rdma_delete_wq = alloc_workqueue("nvmet-rdma-delete-wq",
-                       WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
-       if (!nvmet_rdma_delete_wq) {
-               ret = -ENOMEM;
-               goto err_unreg_transport;
-       }
-
        return 0;
 
-err_unreg_transport:
-       nvmet_unregister_transport(&nvmet_rdma_ops);
 err_ib_client:
        ib_unregister_client(&nvmet_rdma_ib_client);
        return ret;
@@ -1674,7 +1664,6 @@ err_ib_client:
 
 static void __exit nvmet_rdma_exit(void)
 {
-       destroy_workqueue(nvmet_rdma_delete_wq);
        nvmet_unregister_transport(&nvmet_rdma_ops);
        ib_unregister_client(&nvmet_rdma_ib_client);
        WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
index 0f27fad9fe940de645f61f868d0578795e073abc..5592437bb3d155aa415ee23511e404aa2afe9ba3 100644 (file)
@@ -149,9 +149,11 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
         * set by the driver.
         */
        mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1);
-       dev->bus_dma_mask = mask;
        dev->coherent_dma_mask &= mask;
        *dev->dma_mask &= mask;
+       /* ...but only set bus mask if we found valid dma-ranges earlier */
+       if (!ret)
+               dev->bus_dma_mask = mask;
 
        coherent = of_dma_is_coherent(np);
        dev_dbg(dev, "device is%sdma coherent\n",
index 35c64a4295e07edc9b5f59ebaed18d3ab2ef1c52..fe6b13608e5101458254d4f522454df65dc2e8fa 100644 (file)
@@ -104,9 +104,14 @@ static int __init of_numa_parse_distance_map_v1(struct device_node *map)
                distance = of_read_number(matrix, 1);
                matrix++;
 
+               if ((nodea == nodeb && distance != LOCAL_DISTANCE) ||
+                   (nodea != nodeb && distance <= LOCAL_DISTANCE)) {
+                       pr_err("Invalid distance[node%d -> node%d] = %d\n",
+                              nodea, nodeb, distance);
+                       return -EINVAL;
+               }
+
                numa_set_distance(nodea, nodeb, distance);
-               pr_debug("distance[node%d -> node%d] = %d\n",
-                        nodea, nodeb, distance);
 
                /* Set default distance of node B->A same as A->B */
                if (nodeb > nodea)
index 2a4aa64685794434f9ffdc5c38613035e63038f3..921db6f803403a27f7f4f8bb4394b9a94d95e0bd 100644 (file)
@@ -793,15 +793,10 @@ static void pci_acpi_setup(struct device *dev)
 {
        struct pci_dev *pci_dev = to_pci_dev(dev);
        struct acpi_device *adev = ACPI_COMPANION(dev);
-       int node;
 
        if (!adev)
                return;
 
-       node = acpi_get_node(adev->handle);
-       if (node != NUMA_NO_NODE)
-               set_dev_node(dev, node);
-
        pci_acpi_optimize_delay(pci_dev, adev->handle);
 
        pci_acpi_add_pm_notifier(adev, pci_dev);
index 4ceb06f8a33c965aa51cb317b56e8a672b55a090..4edeb4cae72aa28ba251558a499ce5a13436d237 100644 (file)
@@ -830,7 +830,7 @@ static struct meson_bank meson_gxbb_periphs_banks[] = {
 
 static struct meson_bank meson_gxbb_aobus_banks[] = {
        /*   name    first      last       irq    pullen  pull    dir     out     in  */
-       BANK("AO",   GPIOAO_0,  GPIOAO_13, 0, 13, 0,  0,  0, 16,  0,  0,  0, 16,  1,  0),
+       BANK("AO",   GPIOAO_0,  GPIOAO_13, 0, 13, 0,  16, 0, 0,   0,  0,  0, 16,  1,  0),
 };
 
 static struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
index 7dae1d7bf6b0a50f75c9d104f43f93972abaf27d..158f618f169570d07dcbd2b9850c72f7334ca495 100644 (file)
@@ -807,7 +807,7 @@ static struct meson_bank meson_gxl_periphs_banks[] = {
 
 static struct meson_bank meson_gxl_aobus_banks[] = {
        /*   name    first      last      irq   pullen  pull    dir     out     in  */
-       BANK("AO",   GPIOAO_0,  GPIOAO_9, 0, 9, 0,  0,  0, 16,  0,  0,  0, 16,  1,  0),
+       BANK("AO",   GPIOAO_0,  GPIOAO_9, 0, 9, 0,  16, 0, 0,   0,  0,  0, 16,  1,  0),
 };
 
 static struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = {
index f8b778a7d47174b902d398fba74ba1845d88b126..53d449076dee32bb64cf3f0093b4a7e9016b9fa7 100644 (file)
@@ -192,7 +192,7 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
                        dev_dbg(pc->dev, "pin %u: disable bias\n", pin);
 
                        meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit);
-                       ret = regmap_update_bits(pc->reg_pull, reg,
+                       ret = regmap_update_bits(pc->reg_pullen, reg,
                                                 BIT(bit), 0);
                        if (ret)
                                return ret;
index c6d79315218fa69cadcdde9aa49d657d6916f5bb..86466173114da013ff7dff4e6a89195c9399a82d 100644 (file)
@@ -1053,7 +1053,7 @@ static struct meson_bank meson8_cbus_banks[] = {
 
 static struct meson_bank meson8_aobus_banks[] = {
        /*   name    first     last         irq    pullen  pull    dir     out     in  */
-       BANK("AO",   GPIOAO_0, GPIO_TEST_N, 0, 13, 0,  0,  0, 16,  0,  0,  0, 16,  1,  0),
+       BANK("AO",   GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 16,  0,  0,  0,  0,  0, 16,  1,  0),
 };
 
 static struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
index bb2a30964fc69a20bfe488a8904ebd6badaf8e71..647ad15d5c3c41ee538688ade979145efa459b9b 100644 (file)
@@ -906,7 +906,7 @@ static struct meson_bank meson8b_cbus_banks[] = {
 
 static struct meson_bank meson8b_aobus_banks[] = {
        /*   name    first     lastc        irq    pullen  pull    dir     out     in  */
-       BANK("AO",   GPIOAO_0, GPIO_TEST_N, 0, 13, 0,  0,  0, 16,  0,  0,  0, 16,  1,  0),
+       BANK("AO",   GPIOAO_0, GPIO_TEST_N, 0, 13, 0,  16, 0, 0,  0,  0,  0, 16,  1,  0),
 };
 
 static struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
index e79f2a181ad24217a3e3bc232593184b82d494fd..b9ec4a16db1f6b6fd113c5661a28aa0e9153eeaa 100644 (file)
@@ -50,8 +50,10 @@ static int __init rtc_hctosys(void)
        tv64.tv_sec = rtc_tm_to_time64(&tm);
 
 #if BITS_PER_LONG == 32
-       if (tv64.tv_sec > INT_MAX)
+       if (tv64.tv_sec > INT_MAX) {
+               err = -ERANGE;
                goto err_read;
+       }
 #endif
 
        err = do_settimeofday64(&tv64);
index df0c5776d49bb6f5553dbbf2bc97b3235beae902..a5a19ff10535463d91d39d69ced1f13110ff139d 100644 (file)
@@ -257,6 +257,7 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
        struct cmos_rtc *cmos = dev_get_drvdata(dev);
        unsigned char   rtc_control;
 
+       /* This not only a rtc_op, but also called directly */
        if (!is_valid_irq(cmos->irq))
                return -EIO;
 
@@ -452,6 +453,7 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
        unsigned char mon, mday, hrs, min, sec, rtc_control;
        int ret;
 
+       /* This not only a rtc_op, but also called directly */
        if (!is_valid_irq(cmos->irq))
                return -EIO;
 
@@ -516,9 +518,6 @@ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
        struct cmos_rtc *cmos = dev_get_drvdata(dev);
        unsigned long   flags;
 
-       if (!is_valid_irq(cmos->irq))
-               return -EINVAL;
-
        spin_lock_irqsave(&rtc_lock, flags);
 
        if (enabled)
@@ -579,6 +578,12 @@ static const struct rtc_class_ops cmos_rtc_ops = {
        .alarm_irq_enable       = cmos_alarm_irq_enable,
 };
 
+static const struct rtc_class_ops cmos_rtc_ops_no_alarm = {
+       .read_time              = cmos_read_time,
+       .set_time               = cmos_set_time,
+       .proc                   = cmos_procfs,
+};
+
 /*----------------------------------------------------------------*/
 
 /*
@@ -855,9 +860,12 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
                        dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq);
                        goto cleanup1;
                }
+
+               cmos_rtc.rtc->ops = &cmos_rtc_ops;
+       } else {
+               cmos_rtc.rtc->ops = &cmos_rtc_ops_no_alarm;
        }
 
-       cmos_rtc.rtc->ops = &cmos_rtc_ops;
        cmos_rtc.rtc->nvram_old_abi = true;
        retval = rtc_register_device(cmos_rtc.rtc);
        if (retval)
index 9f99a0966550b5e77672e83cc707eda26b22ede5..7cb786d76e3c1da81bf5055bf01375546a74e9ff 100644 (file)
@@ -303,6 +303,9 @@ static int pcf2127_i2c_gather_write(void *context,
        memcpy(buf + 1, val, val_size);
 
        ret = i2c_master_send(client, buf, val_size + 1);
+
+       kfree(buf);
+
        if (ret != val_size + 1)
                return ret < 0 ? ret : -EIO;
 
index 6843bc7ee9f24525789e3bd5100ee8278fd3e413..04e294d1d16d7ea68f8c9aea0d2f56bc5cb6ec11 100644 (file)
@@ -87,6 +87,18 @@ struct qeth_dbf_info {
 #define SENSE_RESETTING_EVENT_BYTE 1
 #define SENSE_RESETTING_EVENT_FLAG 0x80
 
+static inline u32 qeth_get_device_id(struct ccw_device *cdev)
+{
+       struct ccw_dev_id dev_id;
+       u32 id;
+
+       ccw_device_get_id(cdev, &dev_id);
+       id = dev_id.devno;
+       id |= (u32) (dev_id.ssid << 16);
+
+       return id;
+}
+
 /*
  * Common IO related definitions
  */
@@ -97,7 +109,8 @@ struct qeth_dbf_info {
 #define CARD_RDEV_ID(card) dev_name(&card->read.ccwdev->dev)
 #define CARD_WDEV_ID(card) dev_name(&card->write.ccwdev->dev)
 #define CARD_DDEV_ID(card) dev_name(&card->data.ccwdev->dev)
-#define CHANNEL_ID(channel) dev_name(&channel->ccwdev->dev)
+#define CCW_DEVID(cdev)                (qeth_get_device_id(cdev))
+#define CARD_DEVID(card)       (CCW_DEVID(CARD_RDEV(card)))
 
 /**
  * card stuff
@@ -830,6 +843,11 @@ struct qeth_trap_id {
 /*some helper functions*/
 #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
 
+static inline bool qeth_netdev_is_registered(struct net_device *dev)
+{
+       return dev->netdev_ops != NULL;
+}
+
 static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
                                          unsigned int elements)
 {
@@ -973,7 +991,7 @@ int qeth_wait_for_threads(struct qeth_card *, unsigned long);
 int qeth_do_run_thread(struct qeth_card *, unsigned long);
 void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
 void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
-int qeth_core_hardsetup_card(struct qeth_card *);
+int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok);
 void qeth_print_status_message(struct qeth_card *);
 int qeth_init_qdio_queues(struct qeth_card *);
 int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
@@ -1028,11 +1046,6 @@ int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
 int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
 void qeth_trace_features(struct qeth_card *);
 void qeth_close_dev(struct qeth_card *);
-int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16,
-                         long,
-                         int (*reply_cb)(struct qeth_card *,
-                                         struct qeth_reply *, unsigned long),
-                         void *);
 int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
                                                 enum qeth_ipa_funcs,
index 3274f13aad57612967cbcfd832a2349572a0b122..4bce5ae65a55c193ec84c0d64137adde497daf6b 100644 (file)
@@ -167,6 +167,8 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
                                return "OSD_1000";
                        case QETH_LINK_TYPE_10GBIT_ETH:
                                return "OSD_10GIG";
+                       case QETH_LINK_TYPE_25GBIT_ETH:
+                               return "OSD_25GIG";
                        case QETH_LINK_TYPE_LANE_ETH100:
                                return "OSD_FE_LANE";
                        case QETH_LINK_TYPE_LANE_TR:
@@ -554,8 +556,8 @@ static int __qeth_issue_next_read(struct qeth_card *card)
        if (!iob) {
                dev_warn(&card->gdev->dev, "The qeth device driver "
                        "failed to recover an error on the device\n");
-               QETH_DBF_MESSAGE(2, "%s issue_next_read failed: no iob "
-                       "available\n", dev_name(&card->gdev->dev));
+               QETH_DBF_MESSAGE(2, "issue_next_read on device %x failed: no iob available\n",
+                                CARD_DEVID(card));
                return -ENOMEM;
        }
        qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
@@ -563,8 +565,8 @@ static int __qeth_issue_next_read(struct qeth_card *card)
        rc = ccw_device_start(channel->ccwdev, channel->ccw,
                              (addr_t) iob, 0, 0);
        if (rc) {
-               QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
-                       "rc=%i\n", dev_name(&card->gdev->dev), rc);
+               QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
+                                rc, CARD_DEVID(card));
                atomic_set(&channel->irq_pending, 0);
                card->read_or_write_problem = 1;
                qeth_schedule_recovery(card);
@@ -613,16 +615,14 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
        const char *ipa_name;
        int com = cmd->hdr.command;
        ipa_name = qeth_get_ipa_cmd_name(com);
+
        if (rc)
-               QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s/%s returned "
-                               "x%X \"%s\"\n",
-                               ipa_name, com, dev_name(&card->gdev->dev),
-                               QETH_CARD_IFNAME(card), rc,
-                               qeth_get_ipa_msg(rc));
+               QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
+                                ipa_name, com, CARD_DEVID(card), rc,
+                                qeth_get_ipa_msg(rc));
        else
-               QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s/%s succeeded\n",
-                               ipa_name, com, dev_name(&card->gdev->dev),
-                               QETH_CARD_IFNAME(card));
+               QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
+                                ipa_name, com, CARD_DEVID(card));
 }
 
 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
@@ -711,7 +711,7 @@ static int qeth_check_idx_response(struct qeth_card *card,
 
        QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
        if ((buffer[2] & 0xc0) == 0xc0) {
-               QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#02x\n",
+               QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
                                 buffer[4]);
                QETH_CARD_TEXT(card, 2, "ckidxres");
                QETH_CARD_TEXT(card, 2, " idxterm");
@@ -972,8 +972,8 @@ static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
                QETH_CARD_TEXT(card, 2, "CGENCHK");
                dev_warn(&cdev->dev, "The qeth device driver "
                        "failed to recover an error on the device\n");
-               QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n",
-                       dev_name(&cdev->dev), dstat, cstat);
+               QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
+                                CCW_DEVID(cdev), dstat, cstat);
                print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
                                16, 1, irb, 64, 1);
                return 1;
@@ -1013,8 +1013,8 @@ static long qeth_check_irb_error(struct qeth_card *card,
 
        switch (PTR_ERR(irb)) {
        case -EIO:
-               QETH_DBF_MESSAGE(2, "%s i/o-error on device\n",
-                       dev_name(&cdev->dev));
+               QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
+                                CCW_DEVID(cdev));
                QETH_CARD_TEXT(card, 2, "ckirberr");
                QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
                break;
@@ -1031,8 +1031,8 @@ static long qeth_check_irb_error(struct qeth_card *card,
                }
                break;
        default:
-               QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n",
-                       dev_name(&cdev->dev), PTR_ERR(irb));
+               QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
+                                PTR_ERR(irb), CCW_DEVID(cdev));
                QETH_CARD_TEXT(card, 2, "ckirberr");
                QETH_CARD_TEXT(card, 2, "  rc???");
        }
@@ -1114,9 +1114,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
                        dev_warn(&channel->ccwdev->dev,
                                "The qeth device driver failed to recover "
                                "an error on the device\n");
-                       QETH_DBF_MESSAGE(2, "%s sense data available. cstat "
-                               "0x%X dstat 0x%X\n",
-                               dev_name(&channel->ccwdev->dev), cstat, dstat);
+                       QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
+                                        CCW_DEVID(channel->ccwdev), cstat,
+                                        dstat);
                        print_hex_dump(KERN_WARNING, "qeth: irb ",
                                DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
                        print_hex_dump(KERN_WARNING, "qeth: sense data ",
@@ -1890,8 +1890,8 @@ static int qeth_idx_activate_channel(struct qeth_card *card,
        if (channel->state != CH_STATE_ACTIVATING) {
                dev_warn(&channel->ccwdev->dev, "The qeth device driver"
                        " failed to recover an error on the device\n");
-               QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n",
-                       dev_name(&channel->ccwdev->dev));
+               QETH_DBF_MESSAGE(2, "IDX activate timed out on channel %x\n",
+                                CCW_DEVID(channel->ccwdev));
                QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
                return -ETIME;
        }
@@ -1926,17 +1926,15 @@ static void qeth_idx_write_cb(struct qeth_card *card,
                                "The adapter is used exclusively by another "
                                "host\n");
                else
-                       QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:"
-                               " negative reply\n",
-                               dev_name(&channel->ccwdev->dev));
+                       QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
+                                        CCW_DEVID(channel->ccwdev));
                goto out;
        }
        memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
        if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
-               QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: "
-                       "function level mismatch (sent: 0x%x, received: "
-                       "0x%x)\n", dev_name(&channel->ccwdev->dev),
-                       card->info.func_level, temp);
+               QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
+                                CCW_DEVID(channel->ccwdev),
+                                card->info.func_level, temp);
                goto out;
        }
        channel->state = CH_STATE_UP;
@@ -1973,9 +1971,8 @@ static void qeth_idx_read_cb(struct qeth_card *card,
                                "insufficient authorization\n");
                        break;
                default:
-                       QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:"
-                               " negative reply\n",
-                               dev_name(&channel->ccwdev->dev));
+                       QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
+                                        CCW_DEVID(channel->ccwdev));
                }
                QETH_CARD_TEXT_(card, 2, "idxread%c",
                        QETH_IDX_ACT_CAUSE_CODE(iob->data));
@@ -1984,10 +1981,9 @@ static void qeth_idx_read_cb(struct qeth_card *card,
 
        memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
        if (temp != qeth_peer_func_level(card->info.func_level)) {
-               QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function "
-                       "level mismatch (sent: 0x%x, received: 0x%x)\n",
-                       dev_name(&channel->ccwdev->dev),
-                       card->info.func_level, temp);
+               QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
+                                CCW_DEVID(channel->ccwdev),
+                                card->info.func_level, temp);
                goto out;
        }
        memcpy(&card->token.issuer_rm_r,
@@ -2096,9 +2092,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
                                      (addr_t) iob, 0, 0, event_timeout);
        spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
        if (rc) {
-               QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
-                       "ccw_device_start rc = %i\n",
-                       dev_name(&channel->ccwdev->dev), rc);
+               QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
+                                CARD_DEVID(card), rc);
                QETH_CARD_TEXT_(card, 2, " err%d", rc);
                spin_lock_irq(&card->lock);
                list_del_init(&reply->list);
@@ -2853,8 +2848,8 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
        } else {
                dev_warn(&card->gdev->dev,
                         "The qeth driver ran out of channel command buffers\n");
-               QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers",
-                                dev_name(&card->gdev->dev));
+               QETH_DBF_MESSAGE(1, "device %x ran out of channel command buffers",
+                                CARD_DEVID(card));
        }
 
        return iob;
@@ -2989,10 +2984,9 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
                return 0;
        default:
                if (cmd->hdr.return_code) {
-                       QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled "
-                                               "rc=%d\n",
-                                               dev_name(&card->gdev->dev),
-                                               cmd->hdr.return_code);
+                       QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
+                                        CARD_DEVID(card),
+                                        cmd->hdr.return_code);
                        return 0;
                }
        }
@@ -3004,8 +2998,8 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
                card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
                card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
        } else
-               QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected"
-                                       "\n", dev_name(&card->gdev->dev));
+               QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
+                                CARD_DEVID(card));
        return 0;
 }
 
@@ -4297,10 +4291,9 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
                cmd->data.setadapterparms.hdr.return_code);
        if (cmd->data.setadapterparms.hdr.return_code !=
                                                SET_ACCESS_CTRL_RC_SUCCESS)
-               QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n",
-                               card->gdev->dev.kobj.name,
-                               access_ctrl_req->subcmd_code,
-                               cmd->data.setadapterparms.hdr.return_code);
+               QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
+                                access_ctrl_req->subcmd_code, CARD_DEVID(card),
+                                cmd->data.setadapterparms.hdr.return_code);
        switch (cmd->data.setadapterparms.hdr.return_code) {
        case SET_ACCESS_CTRL_RC_SUCCESS:
                if (card->options.isolation == ISOLATION_MODE_NONE) {
@@ -4312,14 +4305,14 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
                }
                break;
        case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
-               QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already "
-                               "deactivated\n", dev_name(&card->gdev->dev));
+               QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
+                                CARD_DEVID(card));
                if (fallback)
                        card->options.isolation = card->options.prev_isolation;
                break;
        case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
-               QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already"
-                               " activated\n", dev_name(&card->gdev->dev));
+               QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
+                                CARD_DEVID(card));
                if (fallback)
                        card->options.isolation = card->options.prev_isolation;
                break;
@@ -4405,10 +4398,8 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
                rc = qeth_setadpparms_set_access_ctrl(card,
                        card->options.isolation, fallback);
                if (rc) {
-                       QETH_DBF_MESSAGE(3,
-                               "IPA(SET_ACCESS_CTRL,%s,%d) sent failed\n",
-                               card->gdev->dev.kobj.name,
-                               rc);
+                       QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
+                                        rc, CARD_DEVID(card));
                        rc = -EOPNOTSUPP;
                }
        } else if (card->options.isolation != ISOLATION_MODE_NONE) {
@@ -4443,7 +4434,8 @@ static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
                rc = BMCR_FULLDPLX;
                if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
                    (card->info.link_type != QETH_LINK_TYPE_OSN) &&
-                   (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
+                   (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
+                   (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
                        rc |= BMCR_SPEED100;
                break;
        case MII_BMSR: /* Basic mode status register */
@@ -4634,8 +4626,8 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
        rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
                                    qeth_snmp_command_cb, (void *)&qinfo);
        if (rc)
-               QETH_DBF_MESSAGE(2, "SNMP command failed on %s: (0x%x)\n",
-                          QETH_CARD_IFNAME(card), rc);
+               QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
+                                CARD_DEVID(card), rc);
        else {
                if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
                        rc = -EFAULT;
@@ -4869,8 +4861,8 @@ static void qeth_determine_capabilities(struct qeth_card *card)
 
        rc = qeth_read_conf_data(card, (void **) &prcd, &length);
        if (rc) {
-               QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
-                       dev_name(&card->gdev->dev), rc);
+               QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
+                                CARD_DEVID(card), rc);
                QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
                goto out_offline;
        }
@@ -5086,7 +5078,7 @@ static struct ccw_driver qeth_ccw_driver = {
        .remove = ccwgroup_remove_ccwdev,
 };
 
-int qeth_core_hardsetup_card(struct qeth_card *card)
+int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
 {
        int retries = 3;
        int rc;
@@ -5096,8 +5088,8 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
        qeth_update_from_chp_desc(card);
 retry:
        if (retries < 3)
-               QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
-                       dev_name(&card->gdev->dev));
+               QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
+                                CARD_DEVID(card));
        rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
        ccw_device_set_offline(CARD_DDEV(card));
        ccw_device_set_offline(CARD_WDEV(card));
@@ -5161,13 +5153,20 @@ retriable:
                if (rc == IPA_RC_LAN_OFFLINE) {
                        dev_warn(&card->gdev->dev,
                                "The LAN is offline\n");
-                       netif_carrier_off(card->dev);
+                       *carrier_ok = false;
                } else {
                        rc = -ENODEV;
                        goto out;
                }
        } else {
-               netif_carrier_on(card->dev);
+               *carrier_ok = true;
+       }
+
+       if (qeth_netdev_is_registered(card->dev)) {
+               if (*carrier_ok)
+                       netif_carrier_on(card->dev);
+               else
+                       netif_carrier_off(card->dev);
        }
 
        card->options.ipa4.supported_funcs = 0;
@@ -5201,8 +5200,8 @@ retriable:
 out:
        dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
                "an error on the device\n");
-       QETH_DBF_MESSAGE(2, "%s Initialization in hardsetup failed! rc=%d\n",
-               dev_name(&card->gdev->dev), rc);
+       QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
+                        CARD_DEVID(card), rc);
        return rc;
 }
 EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
@@ -5481,11 +5480,12 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
 }
 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
 
-int qeth_send_setassparms(struct qeth_card *card,
-                         struct qeth_cmd_buffer *iob, __u16 len, long data,
-                         int (*reply_cb)(struct qeth_card *,
-                                         struct qeth_reply *, unsigned long),
-                         void *reply_param)
+static int qeth_send_setassparms(struct qeth_card *card,
+                                struct qeth_cmd_buffer *iob, u16 len,
+                                long data, int (*reply_cb)(struct qeth_card *,
+                                                           struct qeth_reply *,
+                                                           unsigned long),
+                                void *reply_param)
 {
        int rc;
        struct qeth_ipa_cmd *cmd;
@@ -5501,7 +5501,6 @@ int qeth_send_setassparms(struct qeth_card *card,
        rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
        return rc;
 }
-EXPORT_SYMBOL_GPL(qeth_send_setassparms);
 
 int qeth_send_simple_setassparms_prot(struct qeth_card *card,
                                      enum qeth_ipa_funcs ipa_func,
@@ -6170,8 +6169,14 @@ static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd,
                WARN_ON_ONCE(1);
        }
 
-       /* fallthrough from high to low, to select all legal speeds: */
+       /* partially does fall through, to also select lower speeds */
        switch (maxspeed) {
+       case SPEED_25000:
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    25000baseSR_Full);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    25000baseSR_Full);
+               break;
        case SPEED_10000:
                ethtool_link_ksettings_add_link_mode(cmd, supported,
                                                     10000baseT_Full);
@@ -6254,6 +6259,10 @@ int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
                cmd->base.speed = SPEED_10000;
                cmd->base.port = PORT_FIBRE;
                break;
+       case QETH_LINK_TYPE_25GBIT_ETH:
+               cmd->base.speed = SPEED_25000;
+               cmd->base.port = PORT_FIBRE;
+               break;
        default:
                cmd->base.speed = SPEED_10;
                cmd->base.port = PORT_TP;
@@ -6320,6 +6329,9 @@ int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
        case CARD_INFO_PORTS_10G:
                cmd->base.speed = SPEED_10000;
                break;
+       case CARD_INFO_PORTS_25G:
+               cmd->base.speed = SPEED_25000;
+               break;
        }
 
        return 0;
index e85090467afe0a9e05b6d9b00355713ce53b2ef6..3e54be201b279f07b09481fd9c1c746802496c1e 100644 (file)
@@ -90,6 +90,7 @@ enum qeth_link_types {
        QETH_LINK_TYPE_GBIT_ETH     = 0x03,
        QETH_LINK_TYPE_OSN          = 0x04,
        QETH_LINK_TYPE_10GBIT_ETH   = 0x10,
+       QETH_LINK_TYPE_25GBIT_ETH   = 0x12,
        QETH_LINK_TYPE_LANE_ETH100  = 0x81,
        QETH_LINK_TYPE_LANE_TR      = 0x82,
        QETH_LINK_TYPE_LANE_ETH1000 = 0x83,
@@ -347,6 +348,7 @@ enum qeth_card_info_port_speed {
        CARD_INFO_PORTS_100M            = 0x00000006,
        CARD_INFO_PORTS_1G              = 0x00000007,
        CARD_INFO_PORTS_10G             = 0x00000008,
+       CARD_INFO_PORTS_25G             = 0x0000000A,
 };
 
 /* (SET)DELIP(M) IPA stuff ***************************************************/
@@ -436,7 +438,7 @@ struct qeth_ipacmd_setassparms {
                __u32 flags_32bit;
                struct qeth_ipa_caps caps;
                struct qeth_checksum_cmd chksum;
-               struct qeth_arp_cache_entry add_arp_entry;
+               struct qeth_arp_cache_entry arp_entry;
                struct qeth_arp_query_data query_arp;
                struct qeth_tso_start_data tso;
                __u8 ip[16];
index 23aaf373f631e2283e7c84ddc43f913d876f7058..2914a1a69f8300a36c1bf0580094532cb9ccecd4 100644 (file)
@@ -146,11 +146,11 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
        QETH_CARD_TEXT(card, 2, "L2Wmac");
        rc = qeth_l2_send_setdelmac(card, mac, cmd);
        if (rc == -EEXIST)
-               QETH_DBF_MESSAGE(2, "MAC %pM already registered on %s\n",
-                                mac, QETH_CARD_IFNAME(card));
+               QETH_DBF_MESSAGE(2, "MAC already registered on device %x\n",
+                                CARD_DEVID(card));
        else if (rc)
-               QETH_DBF_MESSAGE(2, "Failed to register MAC %pM on %s: %d\n",
-                                mac, QETH_CARD_IFNAME(card), rc);
+               QETH_DBF_MESSAGE(2, "Failed to register MAC on device %x: %d\n",
+                                CARD_DEVID(card), rc);
        return rc;
 }
 
@@ -163,8 +163,8 @@ static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
        QETH_CARD_TEXT(card, 2, "L2Rmac");
        rc = qeth_l2_send_setdelmac(card, mac, cmd);
        if (rc)
-               QETH_DBF_MESSAGE(2, "Failed to delete MAC %pM on %s: %d\n",
-                                mac, QETH_CARD_IFNAME(card), rc);
+               QETH_DBF_MESSAGE(2, "Failed to delete MAC on device %u: %d\n",
+                                CARD_DEVID(card), rc);
        return rc;
 }
 
@@ -260,9 +260,9 @@ static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
 
        QETH_CARD_TEXT(card, 2, "L2sdvcb");
        if (cmd->hdr.return_code) {
-               QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x.\n",
+               QETH_DBF_MESSAGE(2, "Error in processing VLAN %u on device %x: %#x.\n",
                                 cmd->data.setdelvlan.vlan_id,
-                                QETH_CARD_IFNAME(card), cmd->hdr.return_code);
+                                CARD_DEVID(card), cmd->hdr.return_code);
                QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command);
                QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
        }
@@ -455,8 +455,8 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
                rc = qeth_vm_request_mac(card);
                if (!rc)
                        goto out;
-               QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %s: x%x\n",
-                                CARD_BUS_ID(card), rc);
+               QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %x: %#x\n",
+                                CARD_DEVID(card), rc);
                QETH_DBF_TEXT_(SETUP, 2, "err%04x", rc);
                /* fall back to alternative mechanism: */
        }
@@ -468,8 +468,8 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
                rc = qeth_setadpparms_change_macaddr(card);
                if (!rc)
                        goto out;
-               QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %s: x%x\n",
-                                CARD_BUS_ID(card), rc);
+               QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n",
+                                CARD_DEVID(card), rc);
                QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
                /* fall back once more: */
        }
@@ -826,7 +826,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
 
        if (cgdev->state == CCWGROUP_ONLINE)
                qeth_l2_set_offline(cgdev);
-       unregister_netdev(card->dev);
+       if (qeth_netdev_is_registered(card->dev))
+               unregister_netdev(card->dev);
 }
 
 static const struct ethtool_ops qeth_l2_ethtool_ops = {
@@ -862,11 +863,11 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
        .ndo_set_features       = qeth_set_features
 };
 
-static int qeth_l2_setup_netdev(struct qeth_card *card)
+static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
 {
        int rc;
 
-       if (card->dev->netdev_ops)
+       if (qeth_netdev_is_registered(card->dev))
                return 0;
 
        card->dev->priv_flags |= IFF_UNICAST_FLT;
@@ -919,6 +920,9 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
        qeth_l2_request_initial_mac(card);
        netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
        rc = register_netdev(card->dev);
+       if (!rc && carrier_ok)
+               netif_carrier_on(card->dev);
+
        if (rc)
                card->dev->netdev_ops = NULL;
        return rc;
@@ -949,6 +953,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
        int rc = 0;
        enum qeth_card_states recover_flag;
+       bool carrier_ok;
 
        mutex_lock(&card->discipline_mutex);
        mutex_lock(&card->conf_mutex);
@@ -956,7 +961,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
        QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
 
        recover_flag = card->state;
-       rc = qeth_core_hardsetup_card(card);
+       rc = qeth_core_hardsetup_card(card, &carrier_ok);
        if (rc) {
                QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
                rc = -ENODEV;
@@ -967,7 +972,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                dev_info(&card->gdev->dev,
                "The device represents a Bridge Capable Port\n");
 
-       rc = qeth_l2_setup_netdev(card);
+       rc = qeth_l2_setup_netdev(card, carrier_ok);
        if (rc)
                goto out_remove;
 
index 0b161cc1fd2e62f2251be71e939813ed58b5529c..f08b745c20073b92bd2a78da983ff2a9ade3ca77 100644 (file)
@@ -278,9 +278,6 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
 
        QETH_CARD_TEXT(card, 4, "clearip");
 
-       if (recover && card->options.sniffer)
-               return;
-
        spin_lock_bh(&card->ip_lock);
 
        hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
@@ -494,9 +491,8 @@ int qeth_l3_setrouting_v4(struct qeth_card *card)
                                  QETH_PROT_IPV4);
        if (rc) {
                card->options.route4.type = NO_ROUTER;
-               QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
-                       " on %s. Type set to 'no router'.\n", rc,
-                       QETH_CARD_IFNAME(card));
+               QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n",
+                                rc, CARD_DEVID(card));
        }
        return rc;
 }
@@ -518,9 +514,8 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
                                  QETH_PROT_IPV6);
        if (rc) {
                card->options.route6.type = NO_ROUTER;
-               QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
-                       " on %s. Type set to 'no router'.\n", rc,
-                       QETH_CARD_IFNAME(card));
+               QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n",
+                                rc, CARD_DEVID(card));
        }
        return rc;
 }
@@ -663,6 +658,8 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card,
        int rc = 0;
        int cnt = 3;
 
+       if (card->options.sniffer)
+               return 0;
 
        if (addr->proto == QETH_PROT_IPV4) {
                QETH_CARD_TEXT(card, 2, "setaddr4");
@@ -697,6 +694,9 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
 {
        int rc = 0;
 
+       if (card->options.sniffer)
+               return 0;
+
        if (addr->proto == QETH_PROT_IPV4) {
                QETH_CARD_TEXT(card, 2, "deladdr4");
                QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
@@ -1070,8 +1070,8 @@ qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
                }
                break;
        default:
-               QETH_DBF_MESSAGE(2, "Unknown sniffer action (0x%04x) on %s\n",
-                       cmd->data.diagass.action, QETH_CARD_IFNAME(card));
+               QETH_DBF_MESSAGE(2, "Unknown sniffer action (%#06x) on device %x\n",
+                                cmd->data.diagass.action, CARD_DEVID(card));
        }
 
        return 0;
@@ -1517,32 +1517,25 @@ static void qeth_l3_set_rx_mode(struct net_device *dev)
        qeth_l3_handle_promisc_mode(card);
 }
 
-static const char *qeth_l3_arp_get_error_cause(int *rc)
+static int qeth_l3_arp_makerc(int rc)
 {
-       switch (*rc) {
-       case QETH_IPA_ARP_RC_FAILED:
-               *rc = -EIO;
-               return "operation failed";
+       switch (rc) {
+       case IPA_RC_SUCCESS:
+               return 0;
        case QETH_IPA_ARP_RC_NOTSUPP:
-               *rc = -EOPNOTSUPP;
-               return "operation not supported";
-       case QETH_IPA_ARP_RC_OUT_OF_RANGE:
-               *rc = -EINVAL;
-               return "argument out of range";
        case QETH_IPA_ARP_RC_Q_NOTSUPP:
-               *rc = -EOPNOTSUPP;
-               return "query operation not supported";
+               return -EOPNOTSUPP;
+       case QETH_IPA_ARP_RC_OUT_OF_RANGE:
+               return -EINVAL;
        case QETH_IPA_ARP_RC_Q_NO_DATA:
-               *rc = -ENOENT;
-               return "no query data available";
+               return -ENOENT;
        default:
-               return "unknown error";
+               return -EIO;
        }
 }
 
 static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
 {
-       int tmp;
        int rc;
 
        QETH_CARD_TEXT(card, 3, "arpstnoe");
@@ -1560,13 +1553,10 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
        rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
                                          IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
                                          no_entries);
-       if (rc) {
-               tmp = rc;
-               QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on "
-                       "%s: %s (0x%x/%d)\n", QETH_CARD_IFNAME(card),
-                       qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
-       }
-       return rc;
+       if (rc)
+               QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on device %x: %#x\n",
+                                CARD_DEVID(card), rc);
+       return qeth_l3_arp_makerc(rc);
 }
 
 static __u32 get_arp_entry_size(struct qeth_card *card,
@@ -1716,7 +1706,6 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
 {
        struct qeth_cmd_buffer *iob;
        struct qeth_ipa_cmd *cmd;
-       int tmp;
        int rc;
 
        QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot);
@@ -1735,15 +1724,10 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
        rc = qeth_l3_send_ipa_arp_cmd(card, iob,
                           QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
                           qeth_l3_arp_query_cb, (void *)qinfo);
-       if (rc) {
-               tmp = rc;
-               QETH_DBF_MESSAGE(2,
-                       "Error while querying ARP cache on %s: %s "
-                       "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
-                       qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
-       }
-
-       return rc;
+       if (rc)
+               QETH_DBF_MESSAGE(2, "Error while querying ARP cache on device %x: %#x\n",
+                                CARD_DEVID(card), rc);
+       return qeth_l3_arp_makerc(rc);
 }
 
 static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
@@ -1793,15 +1777,18 @@ out:
        return rc;
 }
 
-static int qeth_l3_arp_add_entry(struct qeth_card *card,
-                               struct qeth_arp_cache_entry *entry)
+static int qeth_l3_arp_modify_entry(struct qeth_card *card,
+                                   struct qeth_arp_cache_entry *entry,
+                                   enum qeth_arp_process_subcmds arp_cmd)
 {
+       struct qeth_arp_cache_entry *cmd_entry;
        struct qeth_cmd_buffer *iob;
-       char buf[16];
-       int tmp;
        int rc;
 
-       QETH_CARD_TEXT(card, 3, "arpadent");
+       if (arp_cmd == IPA_CMD_ASS_ARP_ADD_ENTRY)
+               QETH_CARD_TEXT(card, 3, "arpadd");
+       else
+               QETH_CARD_TEXT(card, 3, "arpdel");
 
        /*
         * currently GuestLAN only supports the ARP assist function
@@ -1814,71 +1801,25 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
                return -EOPNOTSUPP;
        }
 
-       iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
-                                      IPA_CMD_ASS_ARP_ADD_ENTRY,
-                                      sizeof(struct qeth_arp_cache_entry),
-                                      QETH_PROT_IPV4);
+       iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, arp_cmd,
+                                      sizeof(*cmd_entry), QETH_PROT_IPV4);
        if (!iob)
                return -ENOMEM;
-       rc = qeth_send_setassparms(card, iob,
-                                  sizeof(struct qeth_arp_cache_entry),
-                                  (unsigned long) entry,
-                                  qeth_setassparms_cb, NULL);
-       if (rc) {
-               tmp = rc;
-               qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
-               QETH_DBF_MESSAGE(2, "Could not add ARP entry for address %s "
-                       "on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
-                       qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
-       }
-       return rc;
-}
-
-static int qeth_l3_arp_remove_entry(struct qeth_card *card,
-                               struct qeth_arp_cache_entry *entry)
-{
-       struct qeth_cmd_buffer *iob;
-       char buf[16] = {0, };
-       int tmp;
-       int rc;
 
-       QETH_CARD_TEXT(card, 3, "arprment");
+       cmd_entry = &__ipa_cmd(iob)->data.setassparms.data.arp_entry;
+       ether_addr_copy(cmd_entry->macaddr, entry->macaddr);
+       memcpy(cmd_entry->ipaddr, entry->ipaddr, 4);
+       rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
+       if (rc)
+               QETH_DBF_MESSAGE(2, "Could not modify (cmd: %#x) ARP entry on device %x: %#x\n",
+                                arp_cmd, CARD_DEVID(card), rc);
 
-       /*
-        * currently GuestLAN only supports the ARP assist function
-        * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
-        * thus we say EOPNOTSUPP for this ARP function
-        */
-       if (card->info.guestlan)
-               return -EOPNOTSUPP;
-       if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
-               return -EOPNOTSUPP;
-       }
-       memcpy(buf, entry, 12);
-       iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
-                                      IPA_CMD_ASS_ARP_REMOVE_ENTRY,
-                                      12,
-                                      QETH_PROT_IPV4);
-       if (!iob)
-               return -ENOMEM;
-       rc = qeth_send_setassparms(card, iob,
-                                  12, (unsigned long)buf,
-                                  qeth_setassparms_cb, NULL);
-       if (rc) {
-               tmp = rc;
-               memset(buf, 0, 16);
-               qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
-               QETH_DBF_MESSAGE(2, "Could not delete ARP entry for address %s"
-                       " on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
-                       qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
-       }
-       return rc;
+       return qeth_l3_arp_makerc(rc);
 }
 
 static int qeth_l3_arp_flush_cache(struct qeth_card *card)
 {
        int rc;
-       int tmp;
 
        QETH_CARD_TEXT(card, 3, "arpflush");
 
@@ -1894,19 +1835,17 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card)
        }
        rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
                                          IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
-       if (rc) {
-               tmp = rc;
-               QETH_DBF_MESSAGE(2, "Could not flush ARP cache on %s: %s "
-                       "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
-                       qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
-       }
-       return rc;
+       if (rc)
+               QETH_DBF_MESSAGE(2, "Could not flush ARP cache on device %x: %#x\n",
+                                CARD_DEVID(card), rc);
+       return qeth_l3_arp_makerc(rc);
 }
 
 static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
        struct qeth_card *card = dev->ml_priv;
        struct qeth_arp_cache_entry arp_entry;
+       enum qeth_arp_process_subcmds arp_cmd;
        int rc = 0;
 
        switch (cmd) {
@@ -1925,27 +1864,16 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
                rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data);
                break;
        case SIOC_QETH_ARP_ADD_ENTRY:
-               if (!capable(CAP_NET_ADMIN)) {
-                       rc = -EPERM;
-                       break;
-               }
-               if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
-                                  sizeof(struct qeth_arp_cache_entry)))
-                       rc = -EFAULT;
-               else
-                       rc = qeth_l3_arp_add_entry(card, &arp_entry);
-               break;
        case SIOC_QETH_ARP_REMOVE_ENTRY:
-               if (!capable(CAP_NET_ADMIN)) {
-                       rc = -EPERM;
-                       break;
-               }
-               if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
-                                  sizeof(struct qeth_arp_cache_entry)))
-                       rc = -EFAULT;
-               else
-                       rc = qeth_l3_arp_remove_entry(card, &arp_entry);
-               break;
+               if (!capable(CAP_NET_ADMIN))
+                       return -EPERM;
+               if (copy_from_user(&arp_entry, rq->ifr_data, sizeof(arp_entry)))
+                       return -EFAULT;
+
+               arp_cmd = (cmd == SIOC_QETH_ARP_ADD_ENTRY) ?
+                               IPA_CMD_ASS_ARP_ADD_ENTRY :
+                               IPA_CMD_ASS_ARP_REMOVE_ENTRY;
+               return qeth_l3_arp_modify_entry(card, &arp_entry, arp_cmd);
        case SIOC_QETH_ARP_FLUSH_CACHE:
                if (!capable(CAP_NET_ADMIN)) {
                        rc = -EPERM;
@@ -2383,12 +2311,12 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
        .ndo_neigh_setup        = qeth_l3_neigh_setup,
 };
 
-static int qeth_l3_setup_netdev(struct qeth_card *card)
+static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
 {
        unsigned int headroom;
        int rc;
 
-       if (card->dev->netdev_ops)
+       if (qeth_netdev_is_registered(card->dev))
                return 0;
 
        if (card->info.type == QETH_CARD_TYPE_OSD ||
@@ -2457,6 +2385,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
 
        netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
        rc = register_netdev(card->dev);
+       if (!rc && carrier_ok)
+               netif_carrier_on(card->dev);
+
 out:
        if (rc)
                card->dev->netdev_ops = NULL;
@@ -2497,7 +2428,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
        if (cgdev->state == CCWGROUP_ONLINE)
                qeth_l3_set_offline(cgdev);
 
-       unregister_netdev(card->dev);
+       if (qeth_netdev_is_registered(card->dev))
+               unregister_netdev(card->dev);
        qeth_l3_clear_ip_htable(card, 0);
        qeth_l3_clear_ipato_list(card);
 }
@@ -2507,6 +2439,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
        int rc = 0;
        enum qeth_card_states recover_flag;
+       bool carrier_ok;
 
        mutex_lock(&card->discipline_mutex);
        mutex_lock(&card->conf_mutex);
@@ -2514,14 +2447,14 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
        QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
 
        recover_flag = card->state;
-       rc = qeth_core_hardsetup_card(card);
+       rc = qeth_core_hardsetup_card(card, &carrier_ok);
        if (rc) {
                QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
                rc = -ENODEV;
                goto out_remove;
        }
 
-       rc = qeth_l3_setup_netdev(card);
+       rc = qeth_l3_setup_netdev(card, carrier_ok);
        if (rc)
                goto out_remove;
 
index f07444d30b216dace2eb7b7adcc59e818c95192e..640cd1b31a18d2bfc164e50adaf819ce27d6d754 100644 (file)
@@ -578,6 +578,7 @@ config SCSI_MYRB
 config SCSI_MYRS
        tristate "Mylex DAC960/DAC1100 PCI RAID Controller (SCSI Interface)"
        depends on PCI
+       depends on !CPU_BIG_ENDIAN || COMPILE_TEST
        select RAID_ATTRS
        help
          This driver adds support for the Mylex DAC960, AcceleRAID, and
index 8429c855701fca200b56df1453a4fc5188a9a38e..01c23d27f290b114c3f9e0b20b713edd30e2d884 100644 (file)
@@ -1198,7 +1198,7 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
 
 out:
        if (!hostdata->selecting)
-               return NULL;
+               return false;
        hostdata->selecting = NULL;
        return ret;
 }
index f0e457e6884e5ca9c92d84c12c1db2cf6906104b..8df822a4a1bd6624abb8814a70e8bdc502274192 100644 (file)
@@ -904,11 +904,9 @@ static void start_delivery_v1_hw(struct hisi_sas_dq *dq)
 {
        struct hisi_hba *hisi_hba = dq->hisi_hba;
        struct hisi_sas_slot *s, *s1, *s2 = NULL;
-       struct list_head *dq_list;
        int dlvry_queue = dq->id;
        int wp;
 
-       dq_list = &dq->list;
        list_for_each_entry_safe(s, s1, &dq->list, delivery) {
                if (!s->ready)
                        break;
index cc36b6473e986b3191f88160ac41f24f730999d6..77a85ead483e098a8e1d837c8130bfb873dc3671 100644 (file)
@@ -1670,11 +1670,9 @@ static void start_delivery_v2_hw(struct hisi_sas_dq *dq)
 {
        struct hisi_hba *hisi_hba = dq->hisi_hba;
        struct hisi_sas_slot *s, *s1, *s2 = NULL;
-       struct list_head *dq_list;
        int dlvry_queue = dq->id;
        int wp;
 
-       dq_list = &dq->list;
        list_for_each_entry_safe(s, s1, &dq->list, delivery) {
                if (!s->ready)
                        break;
index bd4ce38b98d229ad56fb4a224621929b34d11640..a369450a1fa7bfd71160c06211969609cc056f15 100644 (file)
@@ -886,11 +886,9 @@ static void start_delivery_v3_hw(struct hisi_sas_dq *dq)
 {
        struct hisi_hba *hisi_hba = dq->hisi_hba;
        struct hisi_sas_slot *s, *s1, *s2 = NULL;
-       struct list_head *dq_list;
        int dlvry_queue = dq->id;
        int wp;
 
-       dq_list = &dq->list;
        list_for_each_entry_safe(s, s1, &dq->list, delivery) {
                if (!s->ready)
                        break;
index 0c8005bb0f53f271e958c4b1f51eadd7551474b8..34d311a7dbef1b5a0ad9e0cff3ef52d2f0e9103d 100644 (file)
@@ -698,6 +698,8 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
                rport = lpfc_ndlp_get_nrport(ndlp);
                if (rport)
                        nrport = rport->remoteport;
+               else
+                       nrport = NULL;
                spin_unlock(&phba->hbalock);
                if (!nrport)
                        continue;
index aeb282f617c5c43fd182065e1fd9dbab7e6f69d5..0642f2d0a3bb687a1c7ca3de09e24865afa637a7 100644 (file)
@@ -1049,7 +1049,8 @@ static int myrb_get_hba_config(struct myrb_hba *cb)
                enquiry2->fw.firmware_type = '0';
                enquiry2->fw.turn_id = 0;
        }
-       sprintf(cb->fw_version, "%d.%02d-%c-%02d",
+       snprintf(cb->fw_version, sizeof(cb->fw_version),
+               "%d.%02d-%c-%02d",
                enquiry2->fw.major_version,
                enquiry2->fw.minor_version,
                enquiry2->fw.firmware_type,
index 0264a2e2bc190e068832a89452215919b9b22c30..b8d54ef8cf6dfb84c6d4cb3ce2714071cc2de1d6 100644 (file)
@@ -163,9 +163,12 @@ static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs)
        dma_addr_t ctlr_info_addr;
        union myrs_sgl *sgl;
        unsigned char status;
-       struct myrs_ctlr_info old;
+       unsigned short ldev_present, ldev_critical, ldev_offline;
+
+       ldev_present = cs->ctlr_info->ldev_present;
+       ldev_critical = cs->ctlr_info->ldev_critical;
+       ldev_offline = cs->ctlr_info->ldev_offline;
 
-       memcpy(&old, cs->ctlr_info, sizeof(struct myrs_ctlr_info));
        ctlr_info_addr = dma_map_single(&cs->pdev->dev, cs->ctlr_info,
                                        sizeof(struct myrs_ctlr_info),
                                        DMA_FROM_DEVICE);
@@ -198,9 +201,9 @@ static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs)
                    cs->ctlr_info->rbld_active +
                    cs->ctlr_info->exp_active != 0)
                        cs->needs_update = true;
-               if (cs->ctlr_info->ldev_present != old.ldev_present ||
-                   cs->ctlr_info->ldev_critical != old.ldev_critical ||
-                   cs->ctlr_info->ldev_offline != old.ldev_offline)
+               if (cs->ctlr_info->ldev_present != ldev_present ||
+                   cs->ctlr_info->ldev_critical != ldev_critical ||
+                   cs->ctlr_info->ldev_offline != ldev_offline)
                        shost_printk(KERN_INFO, cs->host,
                                     "Logical drive count changes (%d/%d/%d)\n",
                                     cs->ctlr_info->ldev_critical,
index 6fe20c27acc16e54aaf02112735d2b657c61852d..eb59c796a795de06af7d2fa19a64a30e022a3b87 100644 (file)
@@ -4763,6 +4763,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
        fcport->loop_id = FC_NO_LOOP_ID;
        qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
        fcport->supported_classes = FC_COS_UNSPECIFIED;
+       fcport->fp_speed = PORT_SPEED_UNKNOWN;
 
        fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
                sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
index 518f15141170e733d8acee71cf288c31db6c9261..20c85eed1a7504997a9ab60d2c8247f04ab82995 100644 (file)
@@ -67,7 +67,7 @@ module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(ql2xplogiabsentdevice,
                "Option to enable PLOGI to devices that are not present after "
                "a Fabric scan.  This is needed for several broken switches. "
-               "Default is 0 - no PLOGI. 1 - perfom PLOGI.");
+               "Default is 0 - no PLOGI. 1 - perform PLOGI.");
 
 int ql2xloginretrycount = 0;
 module_param(ql2xloginretrycount, int, S_IRUGO);
index c7fccbb8f5545e463537389bc34b607de3dc9cb3..fa6e0c3b3aa678cd1e62f91021dc89036211aa8a 100644 (file)
@@ -697,6 +697,12 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
                 */
                scsi_mq_uninit_cmd(cmd);
 
+               /*
+                * queue is still alive, so grab the ref for preventing it
+                * from being cleaned up during running queue.
+                */
+               percpu_ref_get(&q->q_usage_counter);
+
                __blk_mq_end_request(req, error);
 
                if (scsi_target(sdev)->single_lun ||
@@ -704,6 +710,8 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
                        kblockd_schedule_work(&sdev->requeue_work);
                else
                        blk_mq_run_hw_queues(q, true);
+
+               percpu_ref_put(&q->q_usage_counter);
        } else {
                unsigned long flags;
 
index 23d7cca36ff031b6463aae8a639b061bfd2c825c..27db55b0ca7f860787d34f35d30ec2919937e490 100644 (file)
@@ -8099,13 +8099,6 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
                err = -ENOMEM;
                goto out_error;
        }
-
-       /*
-        * Do not use blk-mq at this time because blk-mq does not support
-        * runtime pm.
-        */
-       host->use_blk_mq = false;
-
        hba = shost_priv(host);
        hba->host = host;
        hba->dev = dev;
index 73395a7536c5aecd8c62bc04634cb0afb4b418b6..fa933d4229518e230c09d7bed1935543b1b6de5b 100644 (file)
@@ -99,8 +99,6 @@ struct vbox_private {
        int fb_mtrr;
 
        struct {
-               struct drm_global_reference mem_global_ref;
-               struct ttm_bo_global_ref bo_global_ref;
                struct ttm_bo_device bdev;
        } ttm;
 
index 5ecfa76291733a5e01aca0edab6b7f6a40acdb94..b36ec019c33274bf4c8511b024bfeddd7965c980 100644 (file)
@@ -35,61 +35,6 @@ static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
        return container_of(bd, struct vbox_private, ttm.bdev);
 }
 
-static int vbox_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-       return ttm_mem_global_init(ref->object);
-}
-
-static void vbox_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-       ttm_mem_global_release(ref->object);
-}
-
-/**
- * Adds the vbox memory manager object/structures to the global memory manager.
- */
-static int vbox_ttm_global_init(struct vbox_private *vbox)
-{
-       struct drm_global_reference *global_ref;
-       int ret;
-
-       global_ref = &vbox->ttm.mem_global_ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-       global_ref->size = sizeof(struct ttm_mem_global);
-       global_ref->init = &vbox_ttm_mem_global_init;
-       global_ref->release = &vbox_ttm_mem_global_release;
-       ret = drm_global_item_ref(global_ref);
-       if (ret) {
-               DRM_ERROR("Failed setting up TTM memory subsystem.\n");
-               return ret;
-       }
-
-       vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object;
-       global_ref = &vbox->ttm.bo_global_ref.ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_BO;
-       global_ref->size = sizeof(struct ttm_bo_global);
-       global_ref->init = &ttm_bo_global_init;
-       global_ref->release = &ttm_bo_global_release;
-
-       ret = drm_global_item_ref(global_ref);
-       if (ret) {
-               DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-               drm_global_item_unref(&vbox->ttm.mem_global_ref);
-               return ret;
-       }
-
-       return 0;
-}
-
-/**
- * Removes the vbox memory manager object from the global memory manager.
- */
-static void vbox_ttm_global_release(struct vbox_private *vbox)
-{
-       drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
-       drm_global_item_unref(&vbox->ttm.mem_global_ref);
-}
-
 static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
 {
        struct vbox_bo *bo;
@@ -227,18 +172,13 @@ int vbox_mm_init(struct vbox_private *vbox)
        struct drm_device *dev = &vbox->ddev;
        struct ttm_bo_device *bdev = &vbox->ttm.bdev;
 
-       ret = vbox_ttm_global_init(vbox);
-       if (ret)
-               return ret;
-
        ret = ttm_bo_device_init(&vbox->ttm.bdev,
-                                vbox->ttm.bo_global_ref.ref.object,
                                 &vbox_bo_driver,
                                 dev->anon_inode->i_mapping,
                                 DRM_FILE_PAGE_OFFSET, true);
        if (ret) {
                DRM_ERROR("Error initialising bo driver; %d\n", ret);
-               goto err_ttm_global_release;
+               return ret;
        }
 
        ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
@@ -260,8 +200,6 @@ int vbox_mm_init(struct vbox_private *vbox)
 
 err_device_release:
        ttm_bo_device_release(&vbox->ttm.bdev);
-err_ttm_global_release:
-       vbox_ttm_global_release(vbox);
        return ret;
 }
 
@@ -275,7 +213,6 @@ void vbox_mm_fini(struct vbox_private *vbox)
        arch_phys_wc_del(vbox->fb_mtrr);
 #endif
        ttm_bo_device_release(&vbox->ttm.bdev);
-       vbox_ttm_global_release(vbox);
 }
 
 void vbox_ttm_placement(struct vbox_bo *bo, int domain)
index e31e4fc31aa150767c3fc910e6348b6f4a34b7fe..2cfd61d62e9730503414817d52c053c4083ba9e7 100644 (file)
@@ -1778,7 +1778,7 @@ EXPORT_SYMBOL(target_submit_tmr);
 void transport_generic_request_failure(struct se_cmd *cmd,
                sense_reason_t sense_reason)
 {
-       int ret = 0;
+       int ret = 0, post_ret;
 
        pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
                 sense_reason);
@@ -1790,7 +1790,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
        transport_complete_task_attr(cmd);
 
        if (cmd->transport_complete_callback)
-               cmd->transport_complete_callback(cmd, false, NULL);
+               cmd->transport_complete_callback(cmd, false, &post_ret);
 
        if (transport_check_aborted_status(cmd, 1))
                return;
index ff6ba6d86cd8bf9ba43349fde40ec2d576ffed05..cc56cb3b3ecaa222222587da30460a58b2685bd7 100644 (file)
@@ -1614,10 +1614,10 @@ static void sci_request_dma(struct uart_port *port)
                hrtimer_init(&s->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
                s->rx_timer.function = rx_timer_fn;
 
+               s->chan_rx_saved = s->chan_rx = chan;
+
                if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
                        sci_submit_rx(s);
-
-               s->chan_rx_saved = s->chan_rx = chan;
        }
 }
 
@@ -3102,6 +3102,7 @@ static struct uart_driver sci_uart_driver = {
 static int sci_remove(struct platform_device *dev)
 {
        struct sci_port *port = platform_get_drvdata(dev);
+       unsigned int type = port->port.type;    /* uart_remove_... clears it */
 
        sci_ports_in_use &= ~BIT(port->port.line);
        uart_remove_one_port(&sci_uart_driver, &port->port);
@@ -3112,8 +3113,7 @@ static int sci_remove(struct platform_device *dev)
                sysfs_remove_file(&dev->dev.kobj,
                                  &dev_attr_rx_fifo_trigger.attr);
        }
-       if (port->port.type == PORT_SCIFA || port->port.type == PORT_SCIFB ||
-           port->port.type == PORT_HSCIF) {
+       if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF) {
                sysfs_remove_file(&dev->dev.kobj,
                                  &dev_attr_rx_fifo_timeout.attr);
        }
index 7576ceace57151a21007847f14dcf091005f09bb..f438eaa682463bffe42c27d923fe8426adef0926 100644 (file)
@@ -77,7 +77,7 @@ speed_t tty_termios_baud_rate(struct ktermios *termios)
                else
                        cbaud += 15;
        }
-       return baud_table[cbaud];
+       return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
 }
 EXPORT_SYMBOL(tty_termios_baud_rate);
 
@@ -113,7 +113,7 @@ speed_t tty_termios_input_baud_rate(struct ktermios *termios)
                else
                        cbaud += 15;
        }
-       return baud_table[cbaud];
+       return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
 #else  /* IBSHIFT */
        return tty_termios_baud_rate(termios);
 #endif /* IBSHIFT */
index 55370e651db31424db2330b80bd6bd4c4f9d02b8..41ec8e5010f30a544b82ca439cc5a481fe499b19 100644 (file)
@@ -1548,7 +1548,7 @@ static void csi_K(struct vc_data *vc, int vpar)
        scr_memsetw(start + offset, vc->vc_video_erase_char, 2 * count);
        vc->vc_need_wrap = 0;
        if (con_should_update(vc))
-               do_update_region(vc, (unsigned long) start, count);
+               do_update_region(vc, (unsigned long)(start + offset), count);
 }
 
 static void csi_X(struct vc_data *vc, int vpar) /* erase the following vpar positions */
index e36d6c73c4a4184c6246b14ab27b8e0ecb393be8..78118883f96c8ffdf868a4474ba569be59857865 100644 (file)
@@ -23,6 +23,16 @@ config TYPEC_UCSI
 
 if TYPEC_UCSI
 
+config UCSI_CCG
+       tristate "UCSI Interface Driver for Cypress CCGx"
+       depends on I2C
+       help
+         This driver enables UCSI support on platforms that expose a
+         Cypress CCGx Type-C controller over I2C interface.
+
+         To compile the driver as a module, choose M here: the module will be
+         called ucsi_ccg.
+
 config UCSI_ACPI
        tristate "UCSI ACPI Interface Driver"
        depends on ACPI
index 7afbea5122077b3dd0cbe217ad7c839837f499b4..2f4900b26210e245a65115ed1280bab41c19f62d 100644 (file)
@@ -8,3 +8,5 @@ typec_ucsi-y                    := ucsi.o
 typec_ucsi-$(CONFIG_TRACING)   += trace.o
 
 obj-$(CONFIG_UCSI_ACPI)                += ucsi_acpi.o
+
+obj-$(CONFIG_UCSI_CCG)         += ucsi_ccg.o
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
new file mode 100644 (file)
index 0000000..de8a43b
--- /dev/null
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * UCSI driver for Cypress CCGx Type-C controller
+ *
+ * Copyright (C) 2017-2018 NVIDIA Corporation. All rights reserved.
+ * Author: Ajay Gupta <ajayg@nvidia.com>
+ *
+ * Some code borrowed from drivers/usb/typec/ucsi/ucsi_acpi.c
+ */
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include <asm/unaligned.h>
+#include "ucsi.h"
+
+struct ucsi_ccg {
+       struct device *dev;
+       struct ucsi *ucsi;
+       struct ucsi_ppm ppm;
+       struct i2c_client *client;
+};
+
+#define CCGX_RAB_INTR_REG                      0x06
+#define CCGX_RAB_UCSI_CONTROL                  0x39
+#define CCGX_RAB_UCSI_CONTROL_START            BIT(0)
+#define CCGX_RAB_UCSI_CONTROL_STOP             BIT(1)
+#define CCGX_RAB_UCSI_DATA_BLOCK(offset)       (0xf000 | ((offset) & 0xff))
+
+static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
+{
+       struct i2c_client *client = uc->client;
+       const struct i2c_adapter_quirks *quirks = client->adapter->quirks;
+       unsigned char buf[2];
+       struct i2c_msg msgs[] = {
+               {
+                       .addr   = client->addr,
+                       .flags  = 0x0,
+                       .len    = sizeof(buf),
+                       .buf    = buf,
+               },
+               {
+                       .addr   = client->addr,
+                       .flags  = I2C_M_RD,
+                       .buf    = data,
+               },
+       };
+       u32 rlen, rem_len = len, max_read_len = len;
+       int status;
+
+       /* check any max_read_len limitation on i2c adapter */
+       if (quirks && quirks->max_read_len)
+               max_read_len = quirks->max_read_len;
+
+       while (rem_len > 0) {
+               msgs[1].buf = &data[len - rem_len];
+               rlen = min_t(u16, rem_len, max_read_len);
+               msgs[1].len = rlen;
+               put_unaligned_le16(rab, buf);
+               status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+               if (status < 0) {
+                       dev_err(uc->dev, "i2c_transfer failed %d\n", status);
+                       return status;
+               }
+               rab += rlen;
+               rem_len -= rlen;
+       }
+
+       return 0;
+}
+
+static int ccg_write(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
+{
+       struct i2c_client *client = uc->client;
+       unsigned char *buf;
+       struct i2c_msg msgs[] = {
+               {
+                       .addr   = client->addr,
+                       .flags  = 0x0,
+               }
+       };
+       int status;
+
+       buf = kzalloc(len + sizeof(rab), GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       put_unaligned_le16(rab, buf);
+       memcpy(buf + sizeof(rab), data, len);
+
+       msgs[0].len = len + sizeof(rab);
+       msgs[0].buf = buf;
+
+       status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+       if (status < 0) {
+               dev_err(uc->dev, "i2c_transfer failed %d\n", status);
+               kfree(buf);
+               return status;
+       }
+
+       kfree(buf);
+       return 0;
+}
+
+static int ucsi_ccg_init(struct ucsi_ccg *uc)
+{
+       unsigned int count = 10;
+       u8 data;
+       int status;
+
+       data = CCGX_RAB_UCSI_CONTROL_STOP;
+       status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
+       if (status < 0)
+               return status;
+
+       data = CCGX_RAB_UCSI_CONTROL_START;
+       status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
+       if (status < 0)
+               return status;
+
+       /*
+        * Flush CCGx RESPONSE queue by acking interrupts. Above ucsi control
+        * register write will push response which must be cleared.
+        */
+       do {
+               status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+               if (status < 0)
+                       return status;
+
+               if (!data)
+                       return 0;
+
+               status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+               if (status < 0)
+                       return status;
+
+               usleep_range(10000, 11000);
+       } while (--count);
+
+       return -ETIMEDOUT;
+}
+
+static int ucsi_ccg_send_data(struct ucsi_ccg *uc)
+{
+       u8 *ppm = (u8 *)uc->ppm.data;
+       int status;
+       u16 rab;
+
+       rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_out));
+       status = ccg_write(uc, rab, ppm +
+                          offsetof(struct ucsi_data, message_out),
+                          sizeof(uc->ppm.data->message_out));
+       if (status < 0)
+               return status;
+
+       rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, ctrl));
+       return ccg_write(uc, rab, ppm + offsetof(struct ucsi_data, ctrl),
+                        sizeof(uc->ppm.data->ctrl));
+}
+
+static int ucsi_ccg_recv_data(struct ucsi_ccg *uc)
+{
+       u8 *ppm = (u8 *)uc->ppm.data;
+       int status;
+       u16 rab;
+
+       rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, cci));
+       status = ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, cci),
+                         sizeof(uc->ppm.data->cci));
+       if (status < 0)
+               return status;
+
+       rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_in));
+       return ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, message_in),
+                       sizeof(uc->ppm.data->message_in));
+}
+
+static int ucsi_ccg_ack_interrupt(struct ucsi_ccg *uc)
+{
+       int status;
+       unsigned char data;
+
+       status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+       if (status < 0)
+               return status;
+
+       return ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+}
+
+static int ucsi_ccg_sync(struct ucsi_ppm *ppm)
+{
+       struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm);
+       int status;
+
+       status = ucsi_ccg_recv_data(uc);
+       if (status < 0)
+               return status;
+
+       /* ack interrupt to allow next command to run */
+       return ucsi_ccg_ack_interrupt(uc);
+}
+
+static int ucsi_ccg_cmd(struct ucsi_ppm *ppm, struct ucsi_control *ctrl)
+{
+       struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm);
+
+       ppm->data->ctrl.raw_cmd = ctrl->raw_cmd;
+       return ucsi_ccg_send_data(uc);
+}
+
+static irqreturn_t ccg_irq_handler(int irq, void *data)
+{
+       struct ucsi_ccg *uc = data;
+
+       ucsi_notify(uc->ucsi);
+
+       return IRQ_HANDLED;
+}
+
+static int ucsi_ccg_probe(struct i2c_client *client,
+                         const struct i2c_device_id *id)
+{
+       struct device *dev = &client->dev;
+       struct ucsi_ccg *uc;
+       int status;
+       u16 rab;
+
+       uc = devm_kzalloc(dev, sizeof(*uc), GFP_KERNEL);
+       if (!uc)
+               return -ENOMEM;
+
+       uc->ppm.data = devm_kzalloc(dev, sizeof(struct ucsi_data), GFP_KERNEL);
+       if (!uc->ppm.data)
+               return -ENOMEM;
+
+       uc->ppm.cmd = ucsi_ccg_cmd;
+       uc->ppm.sync = ucsi_ccg_sync;
+       uc->dev = dev;
+       uc->client = client;
+
+       /* reset ccg device and initialize ucsi */
+       status = ucsi_ccg_init(uc);
+       if (status < 0) {
+               dev_err(uc->dev, "ucsi_ccg_init failed - %d\n", status);
+               return status;
+       }
+
+       status = devm_request_threaded_irq(dev, client->irq, NULL,
+                                          ccg_irq_handler,
+                                          IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+                                          dev_name(dev), uc);
+       if (status < 0) {
+               dev_err(uc->dev, "request_threaded_irq failed - %d\n", status);
+               return status;
+       }
+
+       uc->ucsi = ucsi_register_ppm(dev, &uc->ppm);
+       if (IS_ERR(uc->ucsi)) {
+               dev_err(uc->dev, "ucsi_register_ppm failed\n");
+               return PTR_ERR(uc->ucsi);
+       }
+
+       rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, version));
+       status = ccg_read(uc, rab, (u8 *)(uc->ppm.data) +
+                         offsetof(struct ucsi_data, version),
+                         sizeof(uc->ppm.data->version));
+       if (status < 0) {
+               ucsi_unregister_ppm(uc->ucsi);
+               return status;
+       }
+
+       i2c_set_clientdata(client, uc);
+       return 0;
+}
+
+static int ucsi_ccg_remove(struct i2c_client *client)
+{
+       struct ucsi_ccg *uc = i2c_get_clientdata(client);
+
+       ucsi_unregister_ppm(uc->ucsi);
+
+       return 0;
+}
+
+static const struct i2c_device_id ucsi_ccg_device_id[] = {
+       {"ccgx-ucsi", 0},
+       {}
+};
+MODULE_DEVICE_TABLE(i2c, ucsi_ccg_device_id);
+
+static struct i2c_driver ucsi_ccg_driver = {
+       .driver = {
+               .name = "ucsi_ccg",
+       },
+       .probe = ucsi_ccg_probe,
+       .remove = ucsi_ccg_remove,
+       .id_table = ucsi_ccg_device_id,
+};
+
+module_i2c_driver(ucsi_ccg_driver);
+
+MODULE_AUTHOR("Ajay Gupta <ajayg@nvidia.com>");
+MODULE_DESCRIPTION("UCSI driver for Cypress CCGx Type-C controller");
+MODULE_LICENSE("GPL v2");
index f15f89df1f3653675da3b84b1c7ce47debd1bac6..7ea6fb6a2e5dd78c53a79bfea140e2e297858f7d 100644 (file)
@@ -914,7 +914,7 @@ int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
 
        ret = xenmem_reservation_increase(args->nr_pages, args->frames);
        if (ret != args->nr_pages) {
-               pr_debug("Failed to decrease reservation for DMA buffer\n");
+               pr_debug("Failed to increase reservation for DMA buffer\n");
                ret = -EFAULT;
        } else {
                ret = 0;
index df1ed37c3269ebd8170a21583676b80efe50a47f..de01a6d0059dc4adcb98a24197750f72b0b4ceaf 100644 (file)
 
 MODULE_LICENSE("GPL");
 
-static unsigned int limit = 64;
-module_param(limit, uint, 0644);
-MODULE_PARM_DESC(limit, "Maximum number of pages that may be allocated by "
-                       "the privcmd-buf device per open file");
-
 struct privcmd_buf_private {
        struct mutex lock;
        struct list_head list;
-       unsigned int allocated;
 };
 
 struct privcmd_buf_vma_private {
@@ -60,13 +54,10 @@ static void privcmd_buf_vmapriv_free(struct privcmd_buf_vma_private *vma_priv)
 {
        unsigned int i;
 
-       vma_priv->file_priv->allocated -= vma_priv->n_pages;
-
        list_del(&vma_priv->list);
 
        for (i = 0; i < vma_priv->n_pages; i++)
-               if (vma_priv->pages[i])
-                       __free_page(vma_priv->pages[i]);
+               __free_page(vma_priv->pages[i]);
 
        kfree(vma_priv);
 }
@@ -146,8 +137,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
        unsigned int i;
        int ret = 0;
 
-       if (!(vma->vm_flags & VM_SHARED) || count > limit ||
-           file_priv->allocated + count > limit)
+       if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
        vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
@@ -155,19 +145,15 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
        if (!vma_priv)
                return -ENOMEM;
 
-       vma_priv->n_pages = count;
-       count = 0;
-       for (i = 0; i < vma_priv->n_pages; i++) {
+       for (i = 0; i < count; i++) {
                vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
                if (!vma_priv->pages[i])
                        break;
-               count++;
+               vma_priv->n_pages++;
        }
 
        mutex_lock(&file_priv->lock);
 
-       file_priv->allocated += count;
-
        vma_priv->file_priv = file_priv;
        vma_priv->users = 1;
 
index 80953528572db52af07ceae9ec782b284ab29610..68f322f600a0677813867c5278b5aca9c23a6e8f 100644 (file)
@@ -3163,6 +3163,9 @@ void btrfs_destroy_inode(struct inode *inode);
 int btrfs_drop_inode(struct inode *inode);
 int __init btrfs_init_cachep(void);
 void __cold btrfs_destroy_cachep(void);
+struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
+                             struct btrfs_root *root, int *new,
+                             struct btrfs_path *path);
 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
                         struct btrfs_root *root, int *was_new);
 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
index b0ab41da91d122b8cd8c8010cd56973af2afd240..3f0b6d1936e8ecd05dc0bf4957bb6aa691204dd4 100644 (file)
@@ -1664,9 +1664,8 @@ static int cleaner_kthread(void *arg)
        struct btrfs_root *root = arg;
        struct btrfs_fs_info *fs_info = root->fs_info;
        int again;
-       struct btrfs_trans_handle *trans;
 
-       do {
+       while (1) {
                again = 0;
 
                /* Make the cleaner go to sleep early. */
@@ -1715,42 +1714,16 @@ static int cleaner_kthread(void *arg)
                 */
                btrfs_delete_unused_bgs(fs_info);
 sleep:
+               if (kthread_should_park())
+                       kthread_parkme();
+               if (kthread_should_stop())
+                       return 0;
                if (!again) {
                        set_current_state(TASK_INTERRUPTIBLE);
-                       if (!kthread_should_stop())
-                               schedule();
+                       schedule();
                        __set_current_state(TASK_RUNNING);
                }
-       } while (!kthread_should_stop());
-
-       /*
-        * Transaction kthread is stopped before us and wakes us up.
-        * However we might have started a new transaction and COWed some
-        * tree blocks when deleting unused block groups for example. So
-        * make sure we commit the transaction we started to have a clean
-        * shutdown when evicting the btree inode - if it has dirty pages
-        * when we do the final iput() on it, eviction will trigger a
-        * writeback for it which will fail with null pointer dereferences
-        * since work queues and other resources were already released and
-        * destroyed by the time the iput/eviction/writeback is made.
-        */
-       trans = btrfs_attach_transaction(root);
-       if (IS_ERR(trans)) {
-               if (PTR_ERR(trans) != -ENOENT)
-                       btrfs_err(fs_info,
-                                 "cleaner transaction attach returned %ld",
-                                 PTR_ERR(trans));
-       } else {
-               int ret;
-
-               ret = btrfs_commit_transaction(trans);
-               if (ret)
-                       btrfs_err(fs_info,
-                                 "cleaner open transaction commit returned %d",
-                                 ret);
        }
-
-       return 0;
 }
 
 static int transaction_kthread(void *arg)
@@ -3931,6 +3904,13 @@ void close_ctree(struct btrfs_fs_info *fs_info)
        int ret;
 
        set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
+       /*
+        * We don't want the cleaner to start new transactions, add more delayed
+        * iputs, etc. while we're closing. We can't use kthread_stop() yet
+        * because that frees the task_struct, and the transaction kthread might
+        * still try to wake up the cleaner.
+        */
+       kthread_park(fs_info->cleaner_kthread);
 
        /* wait for the qgroup rescan worker to stop */
        btrfs_qgroup_wait_for_completion(fs_info, false);
@@ -3958,9 +3938,8 @@ void close_ctree(struct btrfs_fs_info *fs_info)
 
        if (!sb_rdonly(fs_info->sb)) {
                /*
-                * If the cleaner thread is stopped and there are
-                * block groups queued for removal, the deletion will be
-                * skipped when we quit the cleaner thread.
+                * The cleaner kthread is stopped, so do one final pass over
+                * unused block groups.
                 */
                btrfs_delete_unused_bgs(fs_info);
 
@@ -4359,13 +4338,23 @@ static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
        unpin = pinned_extents;
 again:
        while (1) {
+               /*
+                * The btrfs_finish_extent_commit() may get the same range as
+                * ours between find_first_extent_bit and clear_extent_dirty.
+                * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
+                * the same extent range.
+                */
+               mutex_lock(&fs_info->unused_bg_unpin_mutex);
                ret = find_first_extent_bit(unpin, 0, &start, &end,
                                            EXTENT_DIRTY, NULL);
-               if (ret)
+               if (ret) {
+                       mutex_unlock(&fs_info->unused_bg_unpin_mutex);
                        break;
+               }
 
                clear_extent_dirty(unpin, start, end);
                btrfs_error_unpin_extent_range(fs_info, start, end);
+               mutex_unlock(&fs_info->unused_bg_unpin_mutex);
                cond_resched();
        }
 
index 4ba0aedc878bd4422e67d47879b1de297ed848c8..74aa552f47930699aa2a81134c3200bf2f5d5c5e 100644 (file)
@@ -75,7 +75,8 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
         * sure NOFS is set to keep us from deadlocking.
         */
        nofs_flag = memalloc_nofs_save();
-       inode = btrfs_iget(fs_info->sb, &location, root, NULL);
+       inode = btrfs_iget_path(fs_info->sb, &location, root, NULL, path);
+       btrfs_release_path(path);
        memalloc_nofs_restore(nofs_flag);
        if (IS_ERR(inode))
                return inode;
@@ -838,6 +839,25 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
        path->search_commit_root = 1;
        path->skip_locking = 1;
 
+       /*
+        * We must pass a path with search_commit_root set to btrfs_iget in
+        * order to avoid a deadlock when allocating extents for the tree root.
+        *
+        * When we are COWing an extent buffer from the tree root, when looking
+        * for a free extent, at extent-tree.c:find_free_extent(), we can find
+        * block group without its free space cache loaded. When we find one
+        * we must load its space cache which requires reading its free space
+        * cache's inode item from the root tree. If this inode item is located
+        * in the same leaf that we started COWing before, then we end up in
+        * deadlock on the extent buffer (trying to read lock it when we
+        * previously write locked it).
+        *
+        * It's safe to read the inode item using the commit root because
+        * block groups, once loaded, stay in memory forever (until they are
+        * removed) as well as their space caches once loaded. New block groups
+        * once created get their ->cached field set to BTRFS_CACHE_FINISHED so
+        * we will never try to read their inode item while the fs is mounted.
+        */
        inode = lookup_free_space_inode(fs_info, block_group, path);
        if (IS_ERR(inode)) {
                btrfs_free_path(path);
index d3df5b52278cea06c05384ef7bf63aaed6b5404f..9ea4c6f0352f06e828a400890c50122c7ec33ee5 100644 (file)
@@ -1531,12 +1531,11 @@ out_check:
        }
        btrfs_release_path(path);
 
-       if (cur_offset <= end && cow_start == (u64)-1) {
+       if (cur_offset <= end && cow_start == (u64)-1)
                cow_start = cur_offset;
-               cur_offset = end;
-       }
 
        if (cow_start != (u64)-1) {
+               cur_offset = end;
                ret = cow_file_range(inode, locked_page, cow_start, end, end,
                                     page_started, nr_written, 1, NULL);
                if (ret)
@@ -3570,10 +3569,11 @@ static noinline int acls_after_inode_item(struct extent_buffer *leaf,
 /*
  * read an inode from the btree into the in-memory inode
  */
-static int btrfs_read_locked_inode(struct inode *inode)
+static int btrfs_read_locked_inode(struct inode *inode,
+                                  struct btrfs_path *in_path)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
-       struct btrfs_path *path;
+       struct btrfs_path *path = in_path;
        struct extent_buffer *leaf;
        struct btrfs_inode_item *inode_item;
        struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -3589,15 +3589,18 @@ static int btrfs_read_locked_inode(struct inode *inode)
        if (!ret)
                filled = true;
 
-       path = btrfs_alloc_path();
-       if (!path)
-               return -ENOMEM;
+       if (!path) {
+               path = btrfs_alloc_path();
+               if (!path)
+                       return -ENOMEM;
+       }
 
        memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
 
        ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
        if (ret) {
-               btrfs_free_path(path);
+               if (path != in_path)
+                       btrfs_free_path(path);
                return ret;
        }
 
@@ -3722,7 +3725,8 @@ cache_acl:
                                  btrfs_ino(BTRFS_I(inode)),
                                  root->root_key.objectid, ret);
        }
-       btrfs_free_path(path);
+       if (path != in_path)
+               btrfs_free_path(path);
 
        if (!maybe_acls)
                cache_no_acl(inode);
@@ -5644,8 +5648,9 @@ static struct inode *btrfs_iget_locked(struct super_block *s,
 /* Get an inode object given its location and corresponding root.
  * Returns in *is_new if the inode was read from disk
  */
-struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
-                        struct btrfs_root *root, int *new)
+struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
+                             struct btrfs_root *root, int *new,
+                             struct btrfs_path *path)
 {
        struct inode *inode;
 
@@ -5656,7 +5661,7 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
        if (inode->i_state & I_NEW) {
                int ret;
 
-               ret = btrfs_read_locked_inode(inode);
+               ret = btrfs_read_locked_inode(inode, path);
                if (!ret) {
                        inode_tree_add(inode);
                        unlock_new_inode(inode);
@@ -5678,6 +5683,12 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
        return inode;
 }
 
+struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
+                        struct btrfs_root *root, int *new)
+{
+       return btrfs_iget_path(s, location, root, new, NULL);
+}
+
 static struct inode *new_simple_dir(struct super_block *s,
                                    struct btrfs_key *key,
                                    struct btrfs_root *root)
index 3ca6943827ef88e536b2d6c924e7664a3ab835e7..802a628e9f7d7fe629a76e8d108b75c04ed4246e 100644 (file)
@@ -3488,6 +3488,8 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
                        const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;
 
                        len = round_down(i_size_read(src), sz) - loff;
+                       if (len == 0)
+                               return 0;
                        olen = len;
                }
        }
@@ -4257,9 +4259,17 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
                goto out_unlock;
        if (len == 0)
                olen = len = src->i_size - off;
-       /* if we extend to eof, continue to block boundary */
-       if (off + len == src->i_size)
+       /*
+        * If we extend to eof, continue to block boundary if and only if the
+        * destination end offset matches the destination file's size, otherwise
+        * we would be corrupting data by placing the eof block into the middle
+        * of a file.
+        */
+       if (off + len == src->i_size) {
+               if (!IS_ALIGNED(len, bs) && destoff + len < inode->i_size)
+                       goto out_unlock;
                len = ALIGN(src->i_size, bs) - off;
+       }
 
        if (len == 0) {
                ret = 0;
index b362b45dd7578ff2517baf86ac8779a211aba098..cbc9d0d2c12de42786da57c41ebbc9f0364ffabd 100644 (file)
@@ -1916,7 +1916,7 @@ restore:
 }
 
 /* Used to sort the devices by max_avail(descending sort) */
-static int btrfs_cmp_device_free_bytes(const void *dev_info1,
+static inline int btrfs_cmp_device_free_bytes(const void *dev_info1,
                                       const void *dev_info2)
 {
        if (((struct btrfs_device_info *)dev_info1)->max_avail >
@@ -1945,8 +1945,8 @@ static inline void btrfs_descending_sort_devices(
  * The helper to calc the free space on the devices that can be used to store
  * file data.
  */
-static int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
-                                      u64 *free_bytes)
+static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
+                                             u64 *free_bytes)
 {
        struct btrfs_device_info *devices_info;
        struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
index cab0b1f1f741797c32b8d399194554642ecf8a64..efcf89a8ba44c3300f1a214f01ed935ce933c188 100644 (file)
@@ -440,7 +440,7 @@ static int check_block_group_item(struct btrfs_fs_info *fs_info,
            type != (BTRFS_BLOCK_GROUP_METADATA |
                           BTRFS_BLOCK_GROUP_DATA)) {
                block_group_err(fs_info, leaf, slot,
-"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llu or 0x%llx",
+"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llx or 0x%llx",
                        type, hweight64(type),
                        BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA,
                        BTRFS_BLOCK_GROUP_SYSTEM,
index e07f3376b7dfc0c9350117bf3db956781e57d45e..a5ce99a6c936558f820d94a63d1301a111f8486b 100644 (file)
@@ -4396,6 +4396,23 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
        logged_end = end;
 
        list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
+               /*
+                * Skip extents outside our logging range. It's important to do
+                * it for correctness because if we don't ignore them, we may
+                * log them before their ordered extent completes, and therefore
+                * we could log them without logging their respective checksums
+                * (the checksum items are added to the csum tree at the very
+                * end of btrfs_finish_ordered_io()). Also leave such extents
+                * outside of our range in the list, since we may have another
+                * ranged fsync in the near future that needs them. If an extent
+                * outside our range corresponds to a hole, log it to avoid
+                * leaving gaps between extents (fsck will complain when we are
+                * not using the NO_HOLES feature).
+                */
+               if ((em->start > end || em->start + em->len <= start) &&
+                   em->block_start != EXTENT_MAP_HOLE)
+                       continue;
+
                list_del_init(&em->list);
                /*
                 * Just an arbitrary number, this can be really CPU intensive
index 27cad84dab23a9cf94008b3c76bff5440e7780ec..189df668b6a0cf0dc2d000184d170ce58d248076 100644 (file)
@@ -1931,10 +1931,17 @@ static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
        if (!prealloc_cf)
                return -ENOMEM;
 
-       /* Start by sync'ing the source file */
+       /* Start by sync'ing the source and destination files */
        ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
-       if (ret < 0)
+       if (ret < 0) {
+               dout("failed to write src file (%zd)\n", ret);
+               goto out;
+       }
+       ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
+       if (ret < 0) {
+               dout("failed to write dst file (%zd)\n", ret);
                goto out;
+       }
 
        /*
         * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
index 67a9aeb2f4ecdc66ea3cfd6131bf0e4082cb0691..bd13a3267ae03c401d7b0dd0c1f37626bbc42b0a 100644 (file)
@@ -80,12 +80,8 @@ static int parse_reply_info_in(void **p, void *end,
        info->symlink = *p;
        *p += info->symlink_len;
 
-       if (features & CEPH_FEATURE_DIRLAYOUTHASH)
-               ceph_decode_copy_safe(p, end, &info->dir_layout,
-                                     sizeof(info->dir_layout), bad);
-       else
-               memset(&info->dir_layout, 0, sizeof(info->dir_layout));
-
+       ceph_decode_copy_safe(p, end, &info->dir_layout,
+                             sizeof(info->dir_layout), bad);
        ceph_decode_32_safe(p, end, info->xattr_len, bad);
        ceph_decode_need(p, end, info->xattr_len, bad);
        info->xattr_data = *p;
@@ -3182,10 +3178,8 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
        recon_state.pagelist = pagelist;
        if (session->s_con.peer_features & CEPH_FEATURE_MDSENC)
                recon_state.msg_version = 3;
-       else if (session->s_con.peer_features & CEPH_FEATURE_FLOCK)
-               recon_state.msg_version = 2;
        else
-               recon_state.msg_version = 1;
+               recon_state.msg_version = 2;
        err = iterate_session_caps(session, encode_caps_cb, &recon_state);
        if (err < 0)
                goto fail;
index 32d4f13784ba5da85e420a565297eff6b3bf132a..03f4d24db8fe009dc4384b83162979c34f11d1e0 100644 (file)
@@ -237,7 +237,8 @@ static bool check_quota_exceeded(struct inode *inode, enum quota_check_op op,
                ceph_put_snap_realm(mdsc, realm);
                realm = next;
        }
-       ceph_put_snap_realm(mdsc, realm);
+       if (realm)
+               ceph_put_snap_realm(mdsc, realm);
        up_read(&mdsc->snap_rwsem);
 
        return exceeded;
index 05f01fbd9c7fb868ecf5502cb6217e862461ef8e..22a9d81597206ce8c7aca27ab5df823c3493c991 100644 (file)
@@ -5835,9 +5835,10 @@ int ext4_mark_iloc_dirty(handle_t *handle,
 {
        int err = 0;
 
-       if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+       if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
+               put_bh(iloc->bh);
                return -EIO;
-
+       }
        if (IS_I_VERSION(inode))
                inode_inc_iversion(inode);
 
index 17adcb16a9c85f8fee50f2796ed10c79f7e32cd7..437f71fe83ae557ad36af1092026aba6d58ccbc6 100644 (file)
@@ -126,6 +126,7 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
        if (!is_dx_block && type == INDEX) {
                ext4_error_inode(inode, func, line, block,
                       "directory leaf block found instead of index block");
+               brelse(bh);
                return ERR_PTR(-EFSCORRUPTED);
        }
        if (!ext4_has_metadata_csum(inode->i_sb) ||
@@ -2811,7 +2812,9 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
                        list_del_init(&EXT4_I(inode)->i_orphan);
                        mutex_unlock(&sbi->s_orphan_lock);
                }
-       }
+       } else
+               brelse(iloc.bh);
+
        jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
        jbd_debug(4, "orphan inode %lu will point to %d\n",
                        inode->i_ino, NEXT_ORPHAN(inode));
index ebbc663d07985038ef17520fb41c5fae0e5d3637..a5efee34415fe5a6560522f5b3c48b08c3aac0d8 100644 (file)
@@ -459,16 +459,18 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
 
                BUFFER_TRACE(bh, "get_write_access");
                err = ext4_journal_get_write_access(handle, bh);
-               if (err)
+               if (err) {
+                       brelse(bh);
                        return err;
+               }
                ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
                           first_cluster, first_cluster - start, count2);
                ext4_set_bits(bh->b_data, first_cluster - start, count2);
 
                err = ext4_handle_dirty_metadata(handle, NULL, bh);
+               brelse(bh);
                if (unlikely(err))
                        return err;
-               brelse(bh);
        }
 
        return 0;
@@ -605,7 +607,6 @@ handle_bb:
                bh = bclean(handle, sb, block);
                if (IS_ERR(bh)) {
                        err = PTR_ERR(bh);
-                       bh = NULL;
                        goto out;
                }
                overhead = ext4_group_overhead_blocks(sb, group);
@@ -618,9 +619,9 @@ handle_bb:
                ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
                                     sb->s_blocksize * 8, bh->b_data);
                err = ext4_handle_dirty_metadata(handle, NULL, bh);
+               brelse(bh);
                if (err)
                        goto out;
-               brelse(bh);
 
 handle_ib:
                if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
@@ -635,18 +636,16 @@ handle_ib:
                bh = bclean(handle, sb, block);
                if (IS_ERR(bh)) {
                        err = PTR_ERR(bh);
-                       bh = NULL;
                        goto out;
                }
 
                ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
                                     sb->s_blocksize * 8, bh->b_data);
                err = ext4_handle_dirty_metadata(handle, NULL, bh);
+               brelse(bh);
                if (err)
                        goto out;
-               brelse(bh);
        }
-       bh = NULL;
 
        /* Mark group tables in block bitmap */
        for (j = 0; j < GROUP_TABLE_COUNT; j++) {
@@ -685,7 +684,6 @@ handle_ib:
        }
 
 out:
-       brelse(bh);
        err2 = ext4_journal_stop(handle);
        if (err2 && !err)
                err = err2;
@@ -873,6 +871,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
        err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
        if (unlikely(err)) {
                ext4_std_error(sb, err);
+               iloc.bh = NULL;
                goto exit_inode;
        }
        brelse(dind);
@@ -924,6 +923,7 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
                                     sizeof(struct buffer_head *),
                                     GFP_NOFS);
        if (!n_group_desc) {
+               brelse(gdb_bh);
                err = -ENOMEM;
                ext4_warning(sb, "not enough memory for %lu groups",
                             gdb_num + 1);
@@ -939,8 +939,6 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
        kvfree(o_group_desc);
        BUFFER_TRACE(gdb_bh, "get_write_access");
        err = ext4_journal_get_write_access(handle, gdb_bh);
-       if (unlikely(err))
-               brelse(gdb_bh);
        return err;
 }
 
@@ -1124,8 +1122,10 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
                           backup_block, backup_block -
                           ext4_group_first_block_no(sb, group));
                BUFFER_TRACE(bh, "get_write_access");
-               if ((err = ext4_journal_get_write_access(handle, bh)))
+               if ((err = ext4_journal_get_write_access(handle, bh))) {
+                       brelse(bh);
                        break;
+               }
                lock_buffer(bh);
                memcpy(bh->b_data, data, size);
                if (rest)
@@ -2023,7 +2023,7 @@ retry:
 
        err = ext4_alloc_flex_bg_array(sb, n_group + 1);
        if (err)
-               return err;
+               goto out;
 
        err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
        if (err)
@@ -2059,6 +2059,10 @@ retry:
                n_blocks_count_retry = 0;
                free_flex_gd(flex_gd);
                flex_gd = NULL;
+               if (resize_inode) {
+                       iput(resize_inode);
+                       resize_inode = NULL;
+               }
                goto retry;
        }
 
index a221f1cdf70464db0d6551236eb5ecf0f2b76326..53ff6c2a26ed999e008f17c3b0170af9a05e158e 100644 (file)
@@ -4075,6 +4075,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        sbi->s_groups_count = blocks_count;
        sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
                        (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
+       if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
+           le32_to_cpu(es->s_inodes_count)) {
+               ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
+                        le32_to_cpu(es->s_inodes_count),
+                        ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
+               ret = -EINVAL;
+               goto failed_mount;
+       }
        db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
                   EXT4_DESC_PER_BLOCK(sb);
        if (ext4_has_feature_meta_bg(sb)) {
@@ -4094,14 +4102,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                ret = -ENOMEM;
                goto failed_mount;
        }
-       if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
-           le32_to_cpu(es->s_inodes_count)) {
-               ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
-                        le32_to_cpu(es->s_inodes_count),
-                        ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
-               ret = -EINVAL;
-               goto failed_mount;
-       }
 
        bgl_lock_init(sbi->s_blockgroup_lock);
 
@@ -4510,6 +4510,7 @@ failed_mount6:
        percpu_counter_destroy(&sbi->s_freeinodes_counter);
        percpu_counter_destroy(&sbi->s_dirs_counter);
        percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+       percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
 failed_mount5:
        ext4_ext_release(sb);
        ext4_release_system_zone(sb);
index f36fc5d5b257438666641028b8660c0ce3462e91..7643d52c776c61188ca6ac4a7ddd18d303c96e47 100644 (file)
@@ -1031,10 +1031,8 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
        inode_lock(ea_inode);
 
        ret = ext4_reserve_inode_write(handle, ea_inode, &iloc);
-       if (ret) {
-               iloc.bh = NULL;
+       if (ret)
                goto out;
-       }
 
        ref_count = ext4_xattr_inode_get_ref(ea_inode);
        ref_count += ref_change;
@@ -1080,12 +1078,10 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
        }
 
        ret = ext4_mark_iloc_dirty(handle, ea_inode, &iloc);
-       iloc.bh = NULL;
        if (ret)
                ext4_warning_inode(ea_inode,
                                   "ext4_mark_iloc_dirty() failed ret=%d", ret);
 out:
-       brelse(iloc.bh);
        inode_unlock(ea_inode);
        return ret;
 }
@@ -1388,6 +1384,12 @@ retry:
                bh = ext4_getblk(handle, ea_inode, block, 0);
                if (IS_ERR(bh))
                        return PTR_ERR(bh);
+               if (!bh) {
+                       WARN_ON_ONCE(1);
+                       EXT4_ERROR_INODE(ea_inode,
+                                        "ext4_getblk() return bh = NULL");
+                       return -EFSCORRUPTED;
+               }
                ret = ext4_journal_get_write_access(handle, bh);
                if (ret)
                        goto out;
@@ -2276,8 +2278,10 @@ static struct buffer_head *ext4_xattr_get_block(struct inode *inode)
        if (!bh)
                return ERR_PTR(-EIO);
        error = ext4_xattr_check_block(inode, bh);
-       if (error)
+       if (error) {
+               brelse(bh);
                return ERR_PTR(error);
+       }
        return bh;
 }
 
@@ -2397,6 +2401,8 @@ retry_inode:
                        error = ext4_xattr_block_set(handle, inode, &i, &bs);
                } else if (error == -ENOSPC) {
                        if (EXT4_I(inode)->i_file_acl && !bs.s.base) {
+                               brelse(bs.bh);
+                               bs.bh = NULL;
                                error = ext4_xattr_block_find(inode, &i, &bs);
                                if (error)
                                        goto cleanup;
@@ -2617,6 +2623,8 @@ out:
        kfree(buffer);
        if (is)
                brelse(is->iloc.bh);
+       if (bs)
+               brelse(bs->bh);
        kfree(is);
        kfree(bs);
 
@@ -2696,7 +2704,6 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
                               struct ext4_inode *raw_inode, handle_t *handle)
 {
        struct ext4_xattr_ibody_header *header;
-       struct buffer_head *bh;
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
        static unsigned int mnt_count;
        size_t min_offs;
@@ -2737,13 +2744,17 @@ retry:
         * EA block can hold new_extra_isize bytes.
         */
        if (EXT4_I(inode)->i_file_acl) {
+               struct buffer_head *bh;
+
                bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
                error = -EIO;
                if (!bh)
                        goto cleanup;
                error = ext4_xattr_check_block(inode, bh);
-               if (error)
+               if (error) {
+                       brelse(bh);
                        goto cleanup;
+               }
                base = BHDR(bh);
                end = bh->b_data + bh->b_size;
                min_offs = end - base;
index ae813e609932168ec2e74056fb598aca0ad65907..a5e516a40e7a359cdae8b2bf289175e971b6e6c9 100644 (file)
@@ -165,9 +165,13 @@ static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
 
 static void fuse_drop_waiting(struct fuse_conn *fc)
 {
-       if (fc->connected) {
-               atomic_dec(&fc->num_waiting);
-       } else if (atomic_dec_and_test(&fc->num_waiting)) {
+       /*
+        * lockess check of fc->connected is okay, because atomic_dec_and_test()
+        * provides a memory barrier mached with the one in fuse_wait_aborted()
+        * to ensure no wake-up is missed.
+        */
+       if (atomic_dec_and_test(&fc->num_waiting) &&
+           !READ_ONCE(fc->connected)) {
                /* wake up aborters */
                wake_up_all(&fc->blocked_waitq);
        }
@@ -1768,8 +1772,10 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
        req->in.args[1].size = total_len;
 
        err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
-       if (err)
+       if (err) {
                fuse_retrieve_end(fc, req);
+               fuse_put_request(fc, req);
+       }
 
        return err;
 }
@@ -2219,6 +2225,8 @@ EXPORT_SYMBOL_GPL(fuse_abort_conn);
 
 void fuse_wait_aborted(struct fuse_conn *fc)
 {
+       /* matches implicit memory barrier in fuse_drop_waiting() */
+       smp_mb();
        wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
 }
 
index cc2121b37bf5f7d3bb0a57398a2abfc6948ad321..b52f9baaa3e7b9c98478a8c115748ae71fb7b0e1 100644 (file)
@@ -2924,10 +2924,12 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
        }
 
        if (io->async) {
+               bool blocking = io->blocking;
+
                fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
 
                /* we have a non-extending, async request, so return */
-               if (!io->blocking)
+               if (!blocking)
                        return -EIOCBQUEUED;
 
                wait_for_completion(&wait);
index a683d9b27d76033a191b72f81528a7b255de4f08..9a4a15d646ebb2f556828c410cb38c0bd1f30dd5 100644 (file)
@@ -826,7 +826,7 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
        ret = gfs2_meta_inode_buffer(ip, &dibh);
        if (ret)
                goto unlock;
-       iomap->private = dibh;
+       mp->mp_bh[0] = dibh;
 
        if (gfs2_is_stuffed(ip)) {
                if (flags & IOMAP_WRITE) {
@@ -863,9 +863,6 @@ unstuff:
        len = lblock_stop - lblock + 1;
        iomap->length = len << inode->i_blkbits;
 
-       get_bh(dibh);
-       mp->mp_bh[0] = dibh;
-
        height = ip->i_height;
        while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height])
                height++;
@@ -898,8 +895,6 @@ out:
        iomap->bdev = inode->i_sb->s_bdev;
 unlock:
        up_read(&ip->i_rw_mutex);
-       if (ret && dibh)
-               brelse(dibh);
        return ret;
 
 do_alloc:
@@ -980,9 +975,9 @@ static void gfs2_iomap_journaled_page_done(struct inode *inode, loff_t pos,
 
 static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
                                  loff_t length, unsigned flags,
-                                 struct iomap *iomap)
+                                 struct iomap *iomap,
+                                 struct metapath *mp)
 {
-       struct metapath mp = { .mp_aheight = 1, };
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
        unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
@@ -996,9 +991,9 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
        unstuff = gfs2_is_stuffed(ip) &&
                  pos + length > gfs2_max_stuffed_size(ip);
 
-       ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
+       ret = gfs2_iomap_get(inode, pos, length, flags, iomap, mp);
        if (ret)
-               goto out_release;
+               goto out_unlock;
 
        alloc_required = unstuff || iomap->type == IOMAP_HOLE;
 
@@ -1013,7 +1008,7 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
 
                ret = gfs2_quota_lock_check(ip, &ap);
                if (ret)
-                       goto out_release;
+                       goto out_unlock;
 
                ret = gfs2_inplace_reserve(ip, &ap);
                if (ret)
@@ -1038,17 +1033,15 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
                ret = gfs2_unstuff_dinode(ip, NULL);
                if (ret)
                        goto out_trans_end;
-               release_metapath(&mp);
-               brelse(iomap->private);
-               iomap->private = NULL;
+               release_metapath(mp);
                ret = gfs2_iomap_get(inode, iomap->offset, iomap->length,
-                                    flags, iomap, &mp);
+                                    flags, iomap, mp);
                if (ret)
                        goto out_trans_end;
        }
 
        if (iomap->type == IOMAP_HOLE) {
-               ret = gfs2_iomap_alloc(inode, iomap, flags, &mp);
+               ret = gfs2_iomap_alloc(inode, iomap, flags, mp);
                if (ret) {
                        gfs2_trans_end(sdp);
                        gfs2_inplace_release(ip);
@@ -1056,7 +1049,6 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
                        goto out_qunlock;
                }
        }
-       release_metapath(&mp);
        if (!gfs2_is_stuffed(ip) && gfs2_is_jdata(ip))
                iomap->page_done = gfs2_iomap_journaled_page_done;
        return 0;
@@ -1069,10 +1061,7 @@ out_trans_fail:
 out_qunlock:
        if (alloc_required)
                gfs2_quota_unlock(ip);
-out_release:
-       if (iomap->private)
-               brelse(iomap->private);
-       release_metapath(&mp);
+out_unlock:
        gfs2_write_unlock(inode);
        return ret;
 }
@@ -1088,10 +1077,10 @@ static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
 
        trace_gfs2_iomap_start(ip, pos, length, flags);
        if ((flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)) {
-               ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap);
+               ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp);
        } else {
                ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
-               release_metapath(&mp);
+
                /*
                 * Silently fall back to buffered I/O for stuffed files or if
                 * we've hot a hole (see gfs2_file_direct_write).
@@ -1100,6 +1089,11 @@ static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
                    iomap->type != IOMAP_MAPPED)
                        ret = -ENOTBLK;
        }
+       if (!ret) {
+               get_bh(mp.mp_bh[0]);
+               iomap->private = mp.mp_bh[0];
+       }
+       release_metapath(&mp);
        trace_gfs2_iomap_end(ip, iomap, ret);
        return ret;
 }
@@ -1908,10 +1902,16 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
                        if (ret < 0)
                                goto out;
 
-                       /* issue read-ahead on metadata */
-                       if (mp.mp_aheight > 1) {
-                               for (; ret > 1; ret--) {
-                                       metapointer_range(&mp, mp.mp_aheight - ret,
+                       /* On the first pass, issue read-ahead on metadata. */
+                       if (mp.mp_aheight > 1 && strip_h == ip->i_height - 1) {
+                               unsigned int height = mp.mp_aheight - 1;
+
+                               /* No read-ahead for data blocks. */
+                               if (mp.mp_aheight - 1 == strip_h)
+                                       height--;
+
+                               for (; height >= mp.mp_aheight - ret; height--) {
+                                       metapointer_range(&mp, height,
                                                          start_list, start_aligned,
                                                          end_list, end_aligned,
                                                          &start, &end);
index ffe3032b1043deafc6730158f3d262d7a66279af..b08a530433adfb56560fedbf4d741205afdfbd41 100644 (file)
@@ -733,6 +733,7 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
 
                if (gl) {
                        glock_clear_object(gl, rgd);
+                       gfs2_rgrp_brelse(rgd);
                        gfs2_glock_put(gl);
                }
 
@@ -1174,7 +1175,7 @@ static u32 count_unlinked(struct gfs2_rgrpd *rgd)
  * @rgd: the struct gfs2_rgrpd describing the RG to read in
  *
  * Read in all of a Resource Group's header and bitmap blocks.
- * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
+ * Caller must eventually call gfs2_rgrp_brelse() to free the bitmaps.
  *
  * Returns: errno
  */
index 9e198f00b64c6f59e7e4a50b1bc8ddf4ec73cdbf..35d2108d567c25d0de1376a84e8a2a4333859ddf 100644 (file)
@@ -730,8 +730,11 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
                return LRU_REMOVED;
        }
 
-       /* recently referenced inodes get one more pass */
-       if (inode->i_state & I_REFERENCED) {
+       /*
+        * Recently referenced inodes and inodes with many attached pages
+        * get one more pass.
+        */
+       if (inode->i_state & I_REFERENCED || inode->i_data.nrpages > 1) {
                inode->i_state &= ~I_REFERENCED;
                spin_unlock(&inode->i_lock);
                return LRU_ROTATE;
index 98d27da43304706f4c8dcc572a397d89ff34cef2..a7f91265ea671d0f6ebe59d2b9fb0f91bd6155cf 100644 (file)
@@ -695,9 +695,6 @@ static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
 
        hlist_for_each_entry(mp, chain, m_hash) {
                if (mp->m_dentry == dentry) {
-                       /* might be worth a WARN_ON() */
-                       if (d_unlinked(dentry))
-                               return ERR_PTR(-ENOENT);
                        mp->m_count++;
                        return mp;
                }
@@ -711,6 +708,9 @@ static struct mountpoint *get_mountpoint(struct dentry *dentry)
        int ret;
 
        if (d_mountpoint(dentry)) {
+               /* might be worth a WARN_ON() */
+               if (d_unlinked(dentry))
+                       return ERR_PTR(-ENOENT);
 mountpoint:
                read_seqlock_excl(&mount_lock);
                mp = lookup_mountpoint(dentry);
@@ -1540,8 +1540,13 @@ static int do_umount(struct mount *mnt, int flags)
 
        namespace_lock();
        lock_mount_hash();
-       event++;
 
+       /* Recheck MNT_LOCKED with the locks held */
+       retval = -EINVAL;
+       if (mnt->mnt.mnt_flags & MNT_LOCKED)
+               goto out;
+
+       event++;
        if (flags & MNT_DETACH) {
                if (!list_empty(&mnt->mnt_list))
                        umount_tree(mnt, UMOUNT_PROPAGATE);
@@ -1555,6 +1560,7 @@ static int do_umount(struct mount *mnt, int flags)
                        retval = 0;
                }
        }
+out:
        unlock_mount_hash();
        namespace_unlock();
        return retval;
@@ -1645,7 +1651,7 @@ int ksys_umount(char __user *name, int flags)
                goto dput_and_out;
        if (!check_mnt(mnt))
                goto dput_and_out;
-       if (mnt->mnt.mnt_flags & MNT_LOCKED)
+       if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
                goto dput_and_out;
        retval = -EPERM;
        if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
@@ -1728,8 +1734,14 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
                for (s = r; s; s = next_mnt(s, r)) {
                        if (!(flag & CL_COPY_UNBINDABLE) &&
                            IS_MNT_UNBINDABLE(s)) {
-                               s = skip_mnt_tree(s);
-                               continue;
+                               if (s->mnt.mnt_flags & MNT_LOCKED) {
+                                       /* Both unbindable and locked. */
+                                       q = ERR_PTR(-EPERM);
+                                       goto out;
+                               } else {
+                                       s = skip_mnt_tree(s);
+                                       continue;
+                               }
                        }
                        if (!(flag & CL_COPY_MNT_NS_FILE) &&
                            is_mnt_ns_file(s->mnt.mnt_root)) {
@@ -1782,7 +1794,7 @@ void drop_collected_mounts(struct vfsmount *mnt)
 {
        namespace_lock();
        lock_mount_hash();
-       umount_tree(real_mount(mnt), UMOUNT_SYNC);
+       umount_tree(real_mount(mnt), 0);
        unlock_mount_hash();
        namespace_unlock();
 }
index fa515d5ea5ba12e0e47fb0c6e3c67ea898bc0003..7b861bbc0b43f38285866dd0d0b2eb6afd45a9b8 100644 (file)
@@ -66,7 +66,7 @@ __be32 nfs4_callback_getattr(void *argp, void *resp,
 out_iput:
        rcu_read_unlock();
        trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
-       iput(inode);
+       nfs_iput_and_deactive(inode);
 out:
        dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
        return res->status;
@@ -108,7 +108,7 @@ __be32 nfs4_callback_recall(void *argp, void *resp,
        }
        trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
                        &args->stateid, -ntohl(res));
-       iput(inode);
+       nfs_iput_and_deactive(inode);
 out:
        dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
        return res;
index 07b83956057627913ac64b44307d19a5765e03e4..6ec2f78c1e191ef3b7b3666fe458856d1fc547bc 100644 (file)
@@ -850,16 +850,23 @@ nfs_delegation_find_inode_server(struct nfs_server *server,
                                 const struct nfs_fh *fhandle)
 {
        struct nfs_delegation *delegation;
-       struct inode *res = NULL;
+       struct inode *freeme, *res = NULL;
 
        list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
                spin_lock(&delegation->lock);
                if (delegation->inode != NULL &&
                    nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
-                       res = igrab(delegation->inode);
+                       freeme = igrab(delegation->inode);
+                       if (freeme && nfs_sb_active(freeme->i_sb))
+                               res = freeme;
                        spin_unlock(&delegation->lock);
                        if (res != NULL)
                                return res;
+                       if (freeme) {
+                               rcu_read_unlock();
+                               iput(freeme);
+                               rcu_read_lock();
+                       }
                        return ERR_PTR(-EAGAIN);
                }
                spin_unlock(&delegation->lock);
index 62ae0fd345ad6751d5dbbf1ab8aefca85eff9e68..ffea5788539490467fcf83a4950a42ccce2e0ed3 100644 (file)
@@ -2601,11 +2601,12 @@ static void nfs4_state_manager(struct nfs_client *clp)
                nfs4_clear_state_manager_bit(clp);
                /* Did we race with an attempt to give us more work? */
                if (clp->cl_state == 0)
-                       break;
+                       return;
                if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
-                       break;
-       } while (refcount_read(&clp->cl_count) > 1);
-       return;
+                       return;
+       } while (refcount_read(&clp->cl_count) > 1 && !signalled());
+       goto out_drain;
+
 out_error:
        if (strlen(section))
                section_sep = ": ";
@@ -2613,6 +2614,7 @@ out_error:
                        " with error %d\n", section_sep, section,
                        clp->cl_hostname, -status);
        ssleep(1);
+out_drain:
        nfs4_end_drain_session(clp);
        nfs4_clear_state_manager_bit(clp);
 }
index edff074d38c75c19a06a6ae5c634ba1fd1688d98..d505990dac7c9137b33120762b4606af655a0fc2 100644 (file)
@@ -1038,6 +1038,9 @@ nfsd4_verify_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 {
        __be32 status;
 
+       if (!cstate->save_fh.fh_dentry)
+               return nfserr_nofilehandle;
+
        status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->save_fh,
                                            src_stateid, RD_STATE, src, NULL);
        if (status) {
index 5769cf3ff035a4b500154eb4e1a4027d3245cbe3..e08a6647267b17d927641c2ef8f34de01c9bbcb3 100644 (file)
@@ -115,12 +115,12 @@ static bool fanotify_should_send_event(struct fsnotify_iter_info *iter_info,
                        continue;
                mark = iter_info->marks[type];
                /*
-                * if the event is for a child and this inode doesn't care about
-                * events on the child, don't send it!
+                * If the event is for a child and this mark doesn't care about
+                * events on a child, don't send it!
                 */
-               if (type == FSNOTIFY_OBJ_TYPE_INODE &&
-                   (event_mask & FS_EVENT_ON_CHILD) &&
-                   !(mark->mask & FS_EVENT_ON_CHILD))
+               if (event_mask & FS_EVENT_ON_CHILD &&
+                   (type != FSNOTIFY_OBJ_TYPE_INODE ||
+                    !(mark->mask & FS_EVENT_ON_CHILD)))
                        continue;
 
                marks_mask |= mark->mask;
index 2172ba516c61d536f5f05045e0a47d7dae30cfc1..d2c34900ae05da81e941b2a2d7503714ec09d8d0 100644 (file)
@@ -167,9 +167,9 @@ int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask
        parent = dget_parent(dentry);
        p_inode = parent->d_inode;
 
-       if (unlikely(!fsnotify_inode_watches_children(p_inode)))
+       if (unlikely(!fsnotify_inode_watches_children(p_inode))) {
                __fsnotify_update_child_dentry_flags(p_inode);
-       else if (p_inode->i_fsnotify_mask & mask) {
+       } else if (p_inode->i_fsnotify_mask & mask & ALL_FSNOTIFY_EVENTS) {
                struct name_snapshot name;
 
                /* we are notifying a parent so come up with the new mask which
@@ -339,6 +339,9 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
                sb = mnt->mnt.mnt_sb;
                mnt_or_sb_mask = mnt->mnt_fsnotify_mask | sb->s_fsnotify_mask;
        }
+       /* An event "on child" is not intended for a mount/sb mark */
+       if (mask & FS_EVENT_ON_CHILD)
+               mnt_or_sb_mask = 0;
 
        /*
         * Optimization: srcu_read_lock() has a memory barrier which can
index da578ad4c08f4b5f5f66d3e7f3e5b5af77cff812..eb1ce30412dc3e09d1fbd4c8e890d86c2d0a4c9f 100644 (file)
@@ -2411,8 +2411,16 @@ static int ocfs2_dio_end_io(struct kiocb *iocb,
        /* this io's submitter should not have unlocked this before we could */
        BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
 
-       if (bytes > 0 && private)
-               ret = ocfs2_dio_end_io_write(inode, private, offset, bytes);
+       if (bytes <= 0)
+               mlog_ratelimited(ML_ERROR, "Direct IO failed, bytes = %lld",
+                                (long long)bytes);
+       if (private) {
+               if (bytes > 0)
+                       ret = ocfs2_dio_end_io_write(inode, private, offset,
+                                                    bytes);
+               else
+                       ocfs2_dio_free_write_ctx(inode, private);
+       }
 
        ocfs2_iocb_clear_rw_locked(iocb);
 
index 308ea0eb35fd112f29a5546c23dba6f80e60c787..a396096a5099f93e95f43b2db71d4f64ce996448 100644 (file)
@@ -178,6 +178,15 @@ do {                                                                       \
                              ##__VA_ARGS__);                           \
 } while (0)
 
+#define mlog_ratelimited(mask, fmt, ...)                               \
+do {                                                                   \
+       static DEFINE_RATELIMIT_STATE(_rs,                              \
+                                     DEFAULT_RATELIMIT_INTERVAL,       \
+                                     DEFAULT_RATELIMIT_BURST);         \
+       if (__ratelimit(&_rs))                                          \
+               mlog(mask, fmt, ##__VA_ARGS__);                         \
+} while (0)
+
 #define mlog_errno(st) ({                                              \
        int _st = (st);                                                 \
        if (_st != -ERESTARTSYS && _st != -EINTR &&                     \
index 6fc5425b1474a52694b4860aa79cefc52d3fd826..2652d00842d6ba8c6479f816765c87dfc622d1cb 100644 (file)
@@ -243,7 +243,7 @@ xfs_attr3_leaf_verify(
        struct xfs_mount                *mp = bp->b_target->bt_mount;
        struct xfs_attr_leafblock       *leaf = bp->b_addr;
        struct xfs_attr_leaf_entry      *entries;
-       uint16_t                        end;
+       uint32_t                        end;    /* must be 32bit - see below */
        int                             i;
 
        xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
@@ -293,6 +293,11 @@ xfs_attr3_leaf_verify(
        /*
         * Quickly check the freemap information.  Attribute data has to be
         * aligned to 4-byte boundaries, and likewise for the free space.
+        *
+        * Note that for 64k block size filesystems, the freemap entries cannot
+        * overflow as they are only be16 fields. However, when checking end
+        * pointer of the freemap, we have to be careful to detect overflows and
+        * so use uint32_t for those checks.
         */
        for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
                if (ichdr.freemap[i].base > mp->m_attr_geo->blksize)
@@ -303,7 +308,9 @@ xfs_attr3_leaf_verify(
                        return __this_address;
                if (ichdr.freemap[i].size & 0x3)
                        return __this_address;
-               end = ichdr.freemap[i].base + ichdr.freemap[i].size;
+
+               /* be care of 16 bit overflows here */
+               end = (uint32_t)ichdr.freemap[i].base + ichdr.freemap[i].size;
                if (end < ichdr.freemap[i].base)
                        return __this_address;
                if (end > mp->m_attr_geo->blksize)
index 6e2c08f30f602deb360e737003cc3ae1abf4bfc7..6ecdbb3af7de5c02c86a25d41ed7086ae2f845fc 100644 (file)
@@ -1608,7 +1608,7 @@ xfs_ioc_getbmap(
        error = 0;
 out_free_buf:
        kmem_free(buf);
-       return 0;
+       return error;
 }
 
 struct getfsmap_info {
index 576c375ce12a8f411a49f75cf6bd72f69c96279c..6b736ea58d35402eb7e7975067a4303131cf3d83 100644 (file)
@@ -107,5 +107,5 @@ assfail(char *expr, char *file, int line)
 void
 xfs_hex_dump(void *p, int length)
 {
-       print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_ADDRESS, 16, 1, p, length, 1);
+       print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_OFFSET, 16, 1, p, length, 1);
 }
index 89f3b03b14451af9f4a9707172a97143f2ae9b5e..e3667c9a33a5deea5ef1f849b97cb0c3fe83a404 100644 (file)
@@ -3,7 +3,7 @@
 #define _4LEVEL_FIXUP_H
 
 #define __ARCH_HAS_4LEVEL_HACK
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
 
 #define PUD_SHIFT                      PGDIR_SHIFT
 #define PUD_SIZE                       PGDIR_SIZE
index 9c2e0708eb82f4aeb8c009f51f78fc43ae511036..73474bb52344d982abaee00ffcbad322308e06f4 100644 (file)
@@ -3,7 +3,7 @@
 #define _5LEVEL_FIXUP_H
 
 #define __ARCH_HAS_5LEVEL_HACK
-#define __PAGETABLE_P4D_FOLDED
+#define __PAGETABLE_P4D_FOLDED 1
 
 #define P4D_SHIFT                      PGDIR_SHIFT
 #define P4D_SIZE                       PGDIR_SIZE
index 0c34215263b8aec624451b3f04575a1ea6328cf7..1d6dd38c0e5ea8a2155c370cf27bb808f252031e 100644 (file)
@@ -5,7 +5,7 @@
 #ifndef __ASSEMBLY__
 #include <asm-generic/5level-fixup.h>
 
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
 
 /*
  * Having the pud type consist of a pgd gets the size right, and allows
index 1a29b2a0282bf20a8541b79096b474d16c5ae50c..04cb913797bc0d534032364c05d53c50d8d7d73f 100644 (file)
@@ -4,7 +4,7 @@
 
 #ifndef __ASSEMBLY__
 
-#define __PAGETABLE_P4D_FOLDED
+#define __PAGETABLE_P4D_FOLDED 1
 
 typedef struct { pgd_t pgd; } p4d_t;
 
index f35f6e8149e47dca34e7cded26574b0786322fc1..b85b8271a73debc1dc58f661ba9073c399fab175 100644 (file)
@@ -8,7 +8,7 @@
 
 struct mm_struct;
 
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 
 /*
  * Having the pmd type consist of a pud gets the size right, and allows
index e950b9c50f34f218284ff0785366c9ef07a6bdf9..9bef475db6fefe1e3b79c04cff754efd1b383de0 100644 (file)
@@ -9,7 +9,7 @@
 #else
 #include <asm-generic/pgtable-nop4d.h>
 
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
 
 /*
  * Having the pud type consist of a p4d gets the size right, and allows
index 5657a20e0c599449d9851e08f672b2c2cb7f6d64..359fb935ded6ab0cc418659f6f119d26886db129 100644 (file)
@@ -1127,4 +1127,20 @@ static inline bool arch_has_pfn_modify_check(void)
 #endif
 #endif
 
+/*
+ * On some architectures it depends on the mm if the p4d/pud or pmd
+ * layer of the page table hierarchy is folded or not.
+ */
+#ifndef mm_p4d_folded
+#define mm_p4d_folded(mm)      __is_defined(__PAGETABLE_P4D_FOLDED)
+#endif
+
+#ifndef mm_pud_folded
+#define mm_pud_folded(mm)      __is_defined(__PAGETABLE_PUD_FOLDED)
+#endif
+
+#ifndef mm_pmd_folded
+#define mm_pmd_folded(mm)      __is_defined(__PAGETABLE_PMD_FOLDED)
+#endif
+
 #endif /* _ASM_GENERIC_PGTABLE_H */
index 514beb2d483aa34ed00a96fd95b2d99f5aeeeb29..bdb0d5548f39ede087a64e626f34b215834733d5 100644 (file)
@@ -68,7 +68,6 @@
 #include <drm/drm_agpsupport.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_fourcc.h>
-#include <drm/drm_global.h>
 #include <drm/drm_hashtab.h>
 #include <drm/drm_mm.h>
 #include <drm/drm_os_linux.h>
index 2f38d3598eb498ee99245ba7e569d5e5ccc46448..665b9cae7f43c88ed6444f86467d05fab0781a94 100644 (file)
@@ -508,6 +508,18 @@ struct drm_connector_state {
         * drm_writeback_signal_completion()
         */
        struct drm_writeback_job *writeback_job;
+
+       /**
+        * @max_requested_bpc: Connector property to limit the maximum bit
+        * depth of the pixels.
+        */
+       u8 max_requested_bpc;
+
+       /**
+        * @max_bpc: Connector max_bpc based on the requested max_bpc property
+        * and the connector bpc limitations obtained from edid.
+        */
+       u8 max_bpc;
 };
 
 /**
@@ -973,6 +985,12 @@ struct drm_connector {
         */
        struct drm_property_blob *path_blob_ptr;
 
+       /**
+        * @max_bpc_property: Default connector property for the max bpc to be
+        * driven out of the connector.
+        */
+       struct drm_property *max_bpc_property;
+
 #define DRM_CONNECTOR_POLL_HPD (1 << 0)
 #define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
 #define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
@@ -1245,6 +1263,8 @@ void drm_connector_set_link_status_property(struct drm_connector *connector,
                                            uint64_t link_status);
 int drm_connector_init_panel_orientation_property(
        struct drm_connector *connector, int width, int height);
+int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
+                                         int min, int max);
 
 /**
  * struct drm_tile_group - Tile group metadata
index 9ad98e8d9ede0745a67737f19aaaa11c4774fee1..3314e91f6eb318b32f2dc7981b95bd1b4f4c17e4 100644 (file)
 #define DP_DSC_MAX_BITS_PER_PIXEL_LOW       0x067   /* eDP 1.4 */
 
 #define DP_DSC_MAX_BITS_PER_PIXEL_HI        0x068   /* eDP 1.4 */
+# define DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK  (0x3 << 0)
+# define DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT 8
 
 #define DP_DSC_DEC_COLOR_FORMAT_CAP         0x069
 # define DP_DSC_RGB                         (1 << 0)
 # define DP_DSC_THROUGHPUT_MODE_1_1000      (14 << 4)
 
 #define DP_DSC_MAX_SLICE_WIDTH              0x06C
+#define DP_DSC_MIN_SLICE_WIDTH_VALUE        2560
+#define DP_DSC_SLICE_WIDTH_MULTIPLIER       320
 
 #define DP_DSC_SLICE_CAP_2                  0x06D
 # define DP_DSC_16_PER_DP_DSC_SINK          (1 << 0)
 # define DP_AUX_FRAME_SYNC_VALID           (1 << 0)
 
 #define DP_DSC_ENABLE                       0x160   /* DP 1.4 */
+# define DP_DECOMPRESSION_EN                (1 << 0)
 
 #define DP_PSR_EN_CFG                      0x170   /* XXX 1.2? */
 # define DP_PSR_ENABLE                     (1 << 0)
 #define DP_AUX_HDCP_KSV_FIFO           0x6802C
 #define DP_AUX_HDCP_AINFO              0x6803B
 
+/* DP HDCP2.2 parameter offsets in DPCD address space */
+#define DP_HDCP_2_2_REG_RTX_OFFSET             0x69000
+#define DP_HDCP_2_2_REG_TXCAPS_OFFSET          0x69008
+#define DP_HDCP_2_2_REG_CERT_RX_OFFSET         0x6900B
+#define DP_HDCP_2_2_REG_RRX_OFFSET             0x69215
+#define DP_HDCP_2_2_REG_RX_CAPS_OFFSET         0x6921D
+#define DP_HDCP_2_2_REG_EKPUB_KM_OFFSET                0x69220
+#define DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET       0x692A0
+#define DP_HDCP_2_2_REG_M_OFFSET               0x692B0
+#define DP_HDCP_2_2_REG_HPRIME_OFFSET          0x692C0
+#define DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET       0x692E0
+#define DP_HDCP_2_2_REG_RN_OFFSET              0x692F0
+#define DP_HDCP_2_2_REG_LPRIME_OFFSET          0x692F8
+#define DP_HDCP_2_2_REG_EDKEY_KS_OFFSET                0x69318
+#define        DP_HDCP_2_2_REG_RIV_OFFSET              0x69328
+#define DP_HDCP_2_2_REG_RXINFO_OFFSET          0x69330
+#define DP_HDCP_2_2_REG_SEQ_NUM_V_OFFSET       0x69332
+#define DP_HDCP_2_2_REG_VPRIME_OFFSET          0x69335
+#define DP_HDCP_2_2_REG_RECV_ID_LIST_OFFSET    0x69345
+#define DP_HDCP_2_2_REG_V_OFFSET               0x693E0
+#define DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET       0x693F0
+#define DP_HDCP_2_2_REG_K_OFFSET               0x693F3
+#define DP_HDCP_2_2_REG_STREAM_ID_TYPE_OFFSET  0x693F5
+#define DP_HDCP_2_2_REG_MPRIME_OFFSET          0x69473
+#define DP_HDCP_2_2_REG_RXSTATUS_OFFSET                0x69493
+#define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET     0x69494
+#define DP_HDCP_2_2_REG_DBG_OFFSET             0x69518
+
+/* DP HDCP message start offsets in DPCD address space */
+#define DP_HDCP_2_2_AKE_INIT_OFFSET            DP_HDCP_2_2_REG_RTX_OFFSET
+#define DP_HDCP_2_2_AKE_SEND_CERT_OFFSET       DP_HDCP_2_2_REG_CERT_RX_OFFSET
+#define DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET    DP_HDCP_2_2_REG_EKPUB_KM_OFFSET
+#define DP_HDCP_2_2_AKE_STORED_KM_OFFSET       DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET
+#define DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET     DP_HDCP_2_2_REG_HPRIME_OFFSET
+#define DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET \
+                                               DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET
+#define DP_HDCP_2_2_LC_INIT_OFFSET             DP_HDCP_2_2_REG_RN_OFFSET
+#define DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET      DP_HDCP_2_2_REG_LPRIME_OFFSET
+#define DP_HDCP_2_2_SKE_SEND_EKS_OFFSET                DP_HDCP_2_2_REG_EDKEY_KS_OFFSET
+#define DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET        DP_HDCP_2_2_REG_RXINFO_OFFSET
+#define DP_HDCP_2_2_REP_SEND_ACK_OFFSET                DP_HDCP_2_2_REG_V_OFFSET
+#define DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET   DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET
+#define DP_HDCP_2_2_REP_STREAM_READY_OFFSET    DP_HDCP_2_2_REG_MPRIME_OFFSET
+
+#define HDCP_2_2_DP_RXSTATUS_LEN               1
+#define HDCP_2_2_DP_RXSTATUS_READY(x)          ((x) & BIT(0))
+#define HDCP_2_2_DP_RXSTATUS_H_PRIME(x)                ((x) & BIT(1))
+#define HDCP_2_2_DP_RXSTATUS_PAIRING(x)                ((x) & BIT(2))
+#define HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(x)     ((x) & BIT(3))
+#define HDCP_2_2_DP_RXSTATUS_LINK_FAILED(x)    ((x) & BIT(4))
+
 /* DP 1.2 Sideband message defines */
 /* peer device type - DP 1.2a Table 2-92 */
 #define DP_PEER_DEVICE_NONE            0x0
@@ -965,6 +1021,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
 
 #define DP_BRANCH_OUI_HEADER_SIZE      0xc
 #define DP_RECEIVER_CAP_SIZE           0xf
+#define DP_DSC_RECEIVER_CAP_SIZE        0xf
 #define EDP_PSR_RECEIVER_CAP_SIZE      2
 #define EDP_DISPLAY_CTL_CAP_SIZE       3
 
@@ -995,6 +1052,7 @@ struct dp_sdp_header {
 
 #define EDP_SDP_HEADER_REVISION_MASK           0x1F
 #define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES     0x1F
+#define DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 0x7F
 
 struct edp_vsc_psr {
        struct dp_sdp_header sdp_header;
@@ -1061,6 +1119,43 @@ drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
        return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT;
 }
 
+/* DP/eDP DSC support */
+u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+                                  bool is_edp);
+u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
+u8 drm_dp_dsc_sink_max_color_depth(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE]);
+
+static inline bool
+drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+       return dsc_dpcd[DP_DSC_SUPPORT - DP_DSC_SUPPORT] &
+               DP_DSC_DECOMPRESSION_IS_SUPPORTED;
+}
+
+static inline u16
+drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+       return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] |
+               (dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] &
+                DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK <<
+                DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT);
+}
+
+static inline u32
+drm_dp_dsc_sink_max_slice_width(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+       /* Max Slicewidth = Number of Pixels * 320 */
+       return dsc_dpcd[DP_DSC_MAX_SLICE_WIDTH - DP_DSC_SUPPORT] *
+               DP_DSC_SLICE_WIDTH_MULTIPLIER;
+}
+
+/* Forward Error Correction Support on DP 1.4 */
+static inline bool
+drm_dp_sink_supports_fec(const u8 fec_capable)
+{
+       return fec_capable & DP_FEC_CAPABLE;
+}
+
 /*
  * DisplayPort AUX channel
  */
diff --git a/include/drm/drm_global.h b/include/drm/drm_global.h
deleted file mode 100644 (file)
index 3a83060..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-#ifndef _DRM_GLOBAL_H_
-#define _DRM_GLOBAL_H_
-enum drm_global_types {
-       DRM_GLOBAL_TTM_MEM = 0,
-       DRM_GLOBAL_TTM_BO,
-       DRM_GLOBAL_TTM_OBJECT,
-       DRM_GLOBAL_NUM
-};
-
-struct drm_global_reference {
-       enum drm_global_types global_type;
-       size_t size;
-       void *object;
-       int (*init) (struct drm_global_reference *);
-       void (*release) (struct drm_global_reference *);
-};
-
-void drm_global_init(void);
-void drm_global_release(void);
-int drm_global_item_ref(struct drm_global_reference *ref);
-void drm_global_item_unref(struct drm_global_reference *ref);
-
-#endif
index 98e63d870139db0c9956f4446d3e298eb2964e7a..a6de09c5e47f500b1c93b5302fc3a00fef267988 100644 (file)
 #define DRM_HDCP_DDC_BSTATUS                   0x41
 #define DRM_HDCP_DDC_KSV_FIFO                  0x43
 
+#define DRM_HDCP_1_4_SRM_ID                    0x8
+#define DRM_HDCP_1_4_VRL_LENGTH_SIZE           3
+#define DRM_HDCP_1_4_DCP_SIG_SIZE              40
+
+/* Protocol message definition for HDCP2.2 specification */
+/*
+ * Protected content streams are classified into 2 types:
+ * - Type0: Can be transmitted with HDCP 1.4+
+ * - Type1: Can be transmitted with HDCP 2.2+
+ */
+#define HDCP_STREAM_TYPE0                      0x00
+#define HDCP_STREAM_TYPE1                      0x01
+
+/* HDCP2.2 Msg IDs */
+#define HDCP_2_2_NULL_MSG                      1
+#define HDCP_2_2_AKE_INIT                      2
+#define HDCP_2_2_AKE_SEND_CERT                 3
+#define HDCP_2_2_AKE_NO_STORED_KM              4
+#define HDCP_2_2_AKE_STORED_KM                 5
+#define HDCP_2_2_AKE_SEND_HPRIME               7
+#define HDCP_2_2_AKE_SEND_PAIRING_INFO         8
+#define HDCP_2_2_LC_INIT                       9
+#define HDCP_2_2_LC_SEND_LPRIME                        10
+#define HDCP_2_2_SKE_SEND_EKS                  11
+#define HDCP_2_2_REP_SEND_RECVID_LIST          12
+#define HDCP_2_2_REP_SEND_ACK                  15
+#define HDCP_2_2_REP_STREAM_MANAGE             16
+#define HDCP_2_2_REP_STREAM_READY              17
+#define HDCP_2_2_ERRATA_DP_STREAM_TYPE         50
+
+#define HDCP_2_2_RTX_LEN                       8
+#define HDCP_2_2_RRX_LEN                       8
+
+#define HDCP_2_2_K_PUB_RX_MOD_N_LEN            128
+#define HDCP_2_2_K_PUB_RX_EXP_E_LEN            3
+#define HDCP_2_2_K_PUB_RX_LEN                  (HDCP_2_2_K_PUB_RX_MOD_N_LEN + \
+                                                HDCP_2_2_K_PUB_RX_EXP_E_LEN)
+
+#define HDCP_2_2_DCP_LLC_SIG_LEN               384
+
+#define HDCP_2_2_E_KPUB_KM_LEN                 128
+#define HDCP_2_2_E_KH_KM_M_LEN                 (16 + 16)
+#define HDCP_2_2_H_PRIME_LEN                   32
+#define HDCP_2_2_E_KH_KM_LEN                   16
+#define HDCP_2_2_RN_LEN                                8
+#define HDCP_2_2_L_PRIME_LEN                   32
+#define HDCP_2_2_E_DKEY_KS_LEN                 16
+#define HDCP_2_2_RIV_LEN                       8
+#define HDCP_2_2_SEQ_NUM_LEN                   3
+#define HDCP_2_2_V_PRIME_HALF_LEN              (HDCP_2_2_L_PRIME_LEN / 2)
+#define HDCP_2_2_RECEIVER_ID_LEN               DRM_HDCP_KSV_LEN
+#define HDCP_2_2_MAX_DEVICE_COUNT              31
+#define HDCP_2_2_RECEIVER_IDS_MAX_LEN          (HDCP_2_2_RECEIVER_ID_LEN * \
+                                                HDCP_2_2_MAX_DEVICE_COUNT)
+#define HDCP_2_2_MPRIME_LEN                    32
+
+/* Following Macros take a byte at a time for bit(s) masking */
+/*
+ * TODO: This has to be changed for DP MST, as multiple stream on
+ * same port is possible.
+ * For HDCP2.2 on HDMI and DP SST this value is always 1.
+ */
+#define HDCP_2_2_MAX_CONTENT_STREAMS_CNT       1
+#define HDCP_2_2_TXCAP_MASK_LEN                        2
+#define HDCP_2_2_RXCAPS_LEN                    3
+#define HDCP_2_2_RX_REPEATER(x)                        ((x) & BIT(0))
+#define HDCP_2_2_DP_HDCP_CAPABLE(x)            ((x) & BIT(1))
+#define HDCP_2_2_RXINFO_LEN                    2
+
+/* HDCP1.x compliant device in downstream */
+#define HDCP_2_2_HDCP1_DEVICE_CONNECTED(x)     ((x) & BIT(0))
+
+/* HDCP2.0 Compliant repeater in downstream */
+#define HDCP_2_2_HDCP_2_0_REP_CONNECTED(x)     ((x) & BIT(1))
+#define HDCP_2_2_MAX_CASCADE_EXCEEDED(x)       ((x) & BIT(2))
+#define HDCP_2_2_MAX_DEVS_EXCEEDED(x)          ((x) & BIT(3))
+#define HDCP_2_2_DEV_COUNT_LO(x)               (((x) & (0xF << 4)) >> 4)
+#define HDCP_2_2_DEV_COUNT_HI(x)               ((x) & BIT(0))
+#define HDCP_2_2_DEPTH(x)                      (((x) & (0x7 << 1)) >> 1)
+
+struct hdcp2_cert_rx {
+       u8      receiver_id[HDCP_2_2_RECEIVER_ID_LEN];
+       u8      kpub_rx[HDCP_2_2_K_PUB_RX_LEN];
+       u8      reserved[2];
+       u8      dcp_signature[HDCP_2_2_DCP_LLC_SIG_LEN];
+} __packed;
+
+struct hdcp2_streamid_type {
+       u8      stream_id;
+       u8      stream_type;
+} __packed;
+
+/*
+ * The TxCaps field specified in the HDCP HDMI, DP specs
+ * This field is big endian as specified in the errata.
+ */
+struct hdcp2_tx_caps {
+       /* Transmitter must set this to 0x2 */
+       u8      version;
+
+       /* Reserved for HDCP and DP Spec. Read as Zero */
+       u8      tx_cap_mask[HDCP_2_2_TXCAP_MASK_LEN];
+} __packed;
+
+/* Main structures for HDCP2.2 protocol communication */
+struct hdcp2_ake_init {
+       u8                      msg_id;
+       u8                      r_tx[HDCP_2_2_RTX_LEN];
+       struct hdcp2_tx_caps    tx_caps;
+} __packed;
+
+struct hdcp2_ake_send_cert {
+       u8                      msg_id;
+       struct hdcp2_cert_rx    cert_rx;
+       u8                      r_rx[HDCP_2_2_RRX_LEN];
+       u8                      rx_caps[HDCP_2_2_RXCAPS_LEN];
+} __packed;
+
+struct hdcp2_ake_no_stored_km {
+       u8      msg_id;
+       u8      e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN];
+} __packed;
+
+struct hdcp2_ake_stored_km {
+       u8      msg_id;
+       u8      e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN];
+} __packed;
+
+struct hdcp2_ake_send_hprime {
+       u8      msg_id;
+       u8      h_prime[HDCP_2_2_H_PRIME_LEN];
+} __packed;
+
+struct hdcp2_ake_send_pairing_info {
+       u8      msg_id;
+       u8      e_kh_km[HDCP_2_2_E_KH_KM_LEN];
+} __packed;
+
+struct hdcp2_lc_init {
+       u8      msg_id;
+       u8      r_n[HDCP_2_2_RN_LEN];
+} __packed;
+
+struct hdcp2_lc_send_lprime {
+       u8      msg_id;
+       u8      l_prime[HDCP_2_2_L_PRIME_LEN];
+} __packed;
+
+struct hdcp2_ske_send_eks {
+       u8      msg_id;
+       u8      e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN];
+       u8      riv[HDCP_2_2_RIV_LEN];
+} __packed;
+
+struct hdcp2_rep_send_receiverid_list {
+       u8      msg_id;
+       u8      rx_info[HDCP_2_2_RXINFO_LEN];
+       u8      seq_num_v[HDCP_2_2_SEQ_NUM_LEN];
+       u8      v_prime[HDCP_2_2_V_PRIME_HALF_LEN];
+       u8      receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN];
+} __packed;
+
+struct hdcp2_rep_send_ack {
+       u8      msg_id;
+       u8      v[HDCP_2_2_V_PRIME_HALF_LEN];
+} __packed;
+
+struct hdcp2_rep_stream_manage {
+       u8                      msg_id;
+       u8                      seq_num_m[HDCP_2_2_SEQ_NUM_LEN];
+       __be16                  k;
+       struct hdcp2_streamid_type streams[HDCP_2_2_MAX_CONTENT_STREAMS_CNT];
+} __packed;
+
+struct hdcp2_rep_stream_ready {
+       u8      msg_id;
+       u8      m_prime[HDCP_2_2_MPRIME_LEN];
+} __packed;
+
+struct hdcp2_dp_errata_stream_type {
+       u8      msg_id;
+       u8      stream_type;
+} __packed;
+
+/* HDCP2.2 TIMEOUTs in mSec */
+#define HDCP_2_2_CERT_TIMEOUT_MS               100
+#define HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS   1000
+#define HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS      200
+#define HDCP_2_2_PAIRING_TIMEOUT_MS            200
+#define        HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS         20
+#define HDCP_2_2_DP_LPRIME_TIMEOUT_MS          7
+#define HDCP_2_2_RECVID_LIST_TIMEOUT_MS                3000
+#define HDCP_2_2_STREAM_READY_TIMEOUT_MS       100
+
+/* HDMI HDCP2.2 Register Offsets */
+#define HDCP_2_2_HDMI_REG_VER_OFFSET           0x50
+#define HDCP_2_2_HDMI_REG_WR_MSG_OFFSET                0x60
+#define HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET      0x70
+#define HDCP_2_2_HDMI_REG_RD_MSG_OFFSET                0x80
+#define HDCP_2_2_HDMI_REG_DBG_OFFSET           0xC0
+
+#define HDCP_2_2_HDMI_SUPPORT_MASK             BIT(2)
+#define HDCP_2_2_RX_CAPS_VERSION_VAL           0x02
+#define HDCP_2_2_SEQ_NUM_MAX                   0xFFFFFF
+#define        HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN     200
+
+/* Below macros take a byte at a time and mask the bit(s) */
+#define HDCP_2_2_HDMI_RXSTATUS_LEN             2
+#define HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(x)    ((x) & 0x3)
+#define HDCP_2_2_HDMI_RXSTATUS_READY(x)                ((x) & BIT(2))
+#define HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(x)   ((x) & BIT(3))
+
 #endif
index d87b268f1781cb50fd8674d6349a0626760e125f..926379d53484ace11b5e05be242a3c53a296574a 100644 (file)
@@ -264,6 +264,7 @@ struct drm_sched_backend_ops {
  * @hang_limit: once the hangs by a job crosses this limit then it is marked
  *              guilty and it will be considered for scheduling further.
  * @num_jobs: the number of jobs in queue in the scheduler
+ * @ready: marks if the underlying HW is ready to work
  *
  * One scheduler is implemented for each hardware ring.
  */
@@ -283,22 +284,26 @@ struct drm_gpu_scheduler {
        spinlock_t                      job_list_lock;
        int                             hang_limit;
        atomic_t                        num_jobs;
+       bool                    ready;
 };
 
 int drm_sched_init(struct drm_gpu_scheduler *sched,
                   const struct drm_sched_backend_ops *ops,
                   uint32_t hw_submission, unsigned hang_limit, long timeout,
                   const char *name);
+
 void drm_sched_fini(struct drm_gpu_scheduler *sched);
 int drm_sched_job_init(struct drm_sched_job *job,
                       struct drm_sched_entity *entity,
                       void *owner);
+void drm_sched_job_cleanup(struct drm_sched_job *job);
 void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
 void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
                            struct drm_sched_job *job);
 void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
 bool drm_sched_dependency_optimized(struct dma_fence* fence,
                                    struct drm_sched_entity *entity);
+void drm_sched_fault(struct drm_gpu_scheduler *sched);
 void drm_sched_job_kickout(struct drm_sched_job *s_job);
 
 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
index fd965ffbb92e33fcef415033b1a78a09849bda05..192667144693a0ab3adb7ae12d04a420b7567b37 100644 (file)
        INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
 
 /* AML/KBL Y GT2 */
-#define INTEL_AML_GT2_IDS(info) \
+#define INTEL_AML_KBL_GT2_IDS(info) \
        INTEL_VGA_DEVICE(0x591C, info),  /* ULX GT2 */ \
        INTEL_VGA_DEVICE(0x87C0, info) /* ULX GT2 */
 
+/* AML/CFL Y GT2 */
+#define INTEL_AML_CFL_GT2_IDS(info) \
+       INTEL_VGA_DEVICE(0x87CA, info)
+
 #define INTEL_KBL_IDS(info) \
        INTEL_KBL_GT1_IDS(info), \
        INTEL_KBL_GT2_IDS(info), \
        INTEL_KBL_GT3_IDS(info), \
        INTEL_KBL_GT4_IDS(info), \
-       INTEL_AML_GT2_IDS(info)
+       INTEL_AML_KBL_GT2_IDS(info)
 
 /* CFL S */
 #define INTEL_CFL_S_GT1_IDS(info) \
 
 /* WHL/CFL U GT1 */
 #define INTEL_WHL_U_GT1_IDS(info) \
-       INTEL_VGA_DEVICE(0x3EA1, info)
+       INTEL_VGA_DEVICE(0x3EA1, info), \
+       INTEL_VGA_DEVICE(0x3EA4, info)
 
 /* WHL/CFL U GT2 */
 #define INTEL_WHL_U_GT2_IDS(info) \
-       INTEL_VGA_DEVICE(0x3EA0, info)
+       INTEL_VGA_DEVICE(0x3EA0, info), \
+       INTEL_VGA_DEVICE(0x3EA3, info)
 
 /* WHL/CFL U GT3 */
 #define INTEL_WHL_U_GT3_IDS(info) \
-       INTEL_VGA_DEVICE(0x3EA2, info), \
-       INTEL_VGA_DEVICE(0x3EA3, info), \
-       INTEL_VGA_DEVICE(0x3EA4, info)
+       INTEL_VGA_DEVICE(0x3EA2, info)
 
 #define INTEL_CFL_IDS(info)       \
        INTEL_CFL_S_GT1_IDS(info), \
        INTEL_CFL_U_GT3_IDS(info), \
        INTEL_WHL_U_GT1_IDS(info), \
        INTEL_WHL_U_GT2_IDS(info), \
-       INTEL_WHL_U_GT3_IDS(info)
+       INTEL_WHL_U_GT3_IDS(info), \
+       INTEL_AML_CFL_GT2_IDS(info)
 
 /* CNL */
 #define INTEL_CNL_IDS(info) \
index e4fee8e02559be50a8646710a7370d3033135455..1021106438b2a581c9ba642caf68bac2411f7694 100644 (file)
@@ -31,7 +31,6 @@
 #define _TTM_BO_DRIVER_H_
 
 #include <drm/drm_mm.h>
-#include <drm/drm_global.h>
 #include <drm/drm_vma_manager.h>
 #include <linux/workqueue.h>
 #include <linux/fs.h>
@@ -384,15 +383,6 @@ struct ttm_bo_driver {
                             void *buf, int len, int write);
 };
 
-/**
- * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
- */
-
-struct ttm_bo_global_ref {
-       struct drm_global_reference ref;
-       struct ttm_mem_global *mem_glob;
-};
-
 /**
  * struct ttm_bo_global - Buffer object driver global data.
  *
@@ -407,7 +397,7 @@ struct ttm_bo_global_ref {
  * @swap_lru: Lru list of buffer objects used for swapping.
  */
 
-struct ttm_bo_global {
+extern struct ttm_bo_global {
 
        /**
         * Constant after init.
@@ -416,12 +406,12 @@ struct ttm_bo_global {
        struct kobject kobj;
        struct ttm_mem_global *mem_glob;
        struct page *dummy_read_page;
-       struct mutex device_list_mutex;
        spinlock_t lru_lock;
 
        /**
-        * Protected by device_list_mutex.
+        * Protected by ttm_global_mutex.
         */
+       unsigned int use_count;
        struct list_head device_list;
 
        /**
@@ -433,7 +423,7 @@ struct ttm_bo_global {
         * Internal protection.
         */
        atomic_t bo_count;
-};
+} ttm_bo_glob;
 
 
 #define TTM_NUM_MEM_TYPES 8
@@ -578,9 +568,6 @@ void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem);
 void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
                           struct ttm_mem_reg *mem);
 
-void ttm_bo_global_release(struct drm_global_reference *ref);
-int ttm_bo_global_init(struct drm_global_reference *ref);
-
 int ttm_bo_device_release(struct ttm_bo_device *bdev);
 
 /**
@@ -598,7 +585,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev);
  * Returns:
  * !0: Failure.
  */
-int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_global *glob,
+int ttm_bo_device_init(struct ttm_bo_device *bdev,
                       struct ttm_bo_driver *driver,
                       struct address_space *mapping,
                       uint64_t file_page_offset, bool need_dma32);
index 737b5fed80031bac39aa1a59b65a3f2a6da42d11..3ff48a0a2d7b7728469034430434a311e4bbcb40 100644 (file)
@@ -63,7 +63,7 @@
 
 #define TTM_MEM_MAX_ZONES 2
 struct ttm_mem_zone;
-struct ttm_mem_global {
+extern struct ttm_mem_global {
        struct kobject kobj;
        struct ttm_bo_global *bo_glob;
        struct workqueue_struct *swap_queue;
@@ -78,7 +78,7 @@ struct ttm_mem_global {
 #else
        struct ttm_mem_zone *zone_dma32;
 #endif
-};
+} ttm_mem_glob;
 
 extern int ttm_mem_global_init(struct ttm_mem_global *glob);
 extern void ttm_mem_global_release(struct ttm_mem_global *glob);
index 6b92b3395fa9954ec140df3ed5b2b6d7b4f527f1..65a38c4a02a18d59ff5837447264f7f00998e4fb 100644 (file)
@@ -213,12 +213,6 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
         CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \
         CEPH_FEATURE_CEPHX_V2)
 
-#define CEPH_FEATURES_REQUIRED_DEFAULT   \
-       (CEPH_FEATURE_NOSRCADDR |        \
-        CEPH_FEATURE_SUBSCRIBE2 |       \
-        CEPH_FEATURE_RECONNECT_SEQ |    \
-        CEPH_FEATURE_PGID64 |           \
-        CEPH_FEATURE_PGPOOL3 |          \
-        CEPH_FEATURE_OSDENC)
+#define CEPH_FEATURES_REQUIRED_DEFAULT 0
 
 #endif
index c0f5db3a962174953a0e8bf19873b7a22334448d..2010493e1040846c999804e2e157233c27ccef60 100644 (file)
 #define KASAN_ABI_VERSION 3
 #endif
 
-/*
- * Because __no_sanitize_address conflicts with inlining:
- *   https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
- * we do one or the other. 
- */
-#ifdef CONFIG_KASAN
-#define __no_sanitize_address_or_inline                                        \
-       __no_sanitize_address __maybe_unused notrace
-#else
-#define __no_sanitize_address_or_inline inline
-#endif
-
 #if GCC_VERSION >= 50100
 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
 #endif
index 18c80cfa4fc485efeb3f85aa3ddcaeadf49ad7d6..06396c1cf127f75bb357326883f1dcb69161ccf1 100644 (file)
@@ -189,7 +189,7 @@ void __read_once_size(const volatile void *p, void *res, int size)
  *     https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
  * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
  */
-# define __no_kasan_or_inline __no_sanitize_address __maybe_unused
+# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
 #else
 # define __no_kasan_or_inline __always_inline
 #endif
index 6b28c1b7310c0176bddb490f9443eb15b101400e..f8c400ba1929ee503be1b09824e1606860fac0fc 100644 (file)
@@ -4,22 +4,26 @@
 
 /*
  * The attributes in this file are unconditionally defined and they directly
- * map to compiler attribute(s) -- except those that are optional.
+ * map to compiler attribute(s), unless one of the compilers does not support
+ * the attribute. In that case, __has_attribute is used to check for support
+ * and the reason is stated in its comment ("Optional: ...").
  *
  * Any other "attributes" (i.e. those that depend on a configuration option,
  * on a compiler, on an architecture, on plugins, on other attributes...)
  * should be defined elsewhere (e.g. compiler_types.h or compiler-*.h).
+ * The intention is to keep this file as simple as possible, as well as
+ * compiler- and version-agnostic (e.g. avoiding GCC_VERSION checks).
  *
  * This file is meant to be sorted (by actual attribute name,
  * not by #define identifier). Use the __attribute__((__name__)) syntax
  * (i.e. with underscores) to avoid future collisions with other macros.
- * If an attribute is optional, state the reason in the comment.
+ * Provide links to the documentation of each supported compiler, if it exists.
  */
 
 /*
- * To check for optional attributes, we use __has_attribute, which is supported
- * on gcc >= 5, clang >= 2.9 and icc >= 17. In the meantime, to support
- * 4.6 <= gcc < 5, we implement __has_attribute by hand.
+ * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17.
+ * In the meantime, to support 4.6 <= gcc < 5, we implement __has_attribute
+ * by hand.
  *
  * sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__
  * depending on the compiler used to build it; however, these attributes have
index 3439d7d0249aa592bc0100a9f8058fb2b3a2c249..4a3f9c09c92d04583f9a0d4fa335a1fb9ce44ab8 100644 (file)
@@ -130,6 +130,10 @@ struct ftrace_likely_data {
 # define randomized_struct_fields_end
 #endif
 
+#ifndef asm_volatile_goto
+#define asm_volatile_goto(x...) asm goto(x)
+#endif
+
 /* Are two types/vars the same type (ignoring qualifiers)? */
 #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
 
index 845174e113ce9b360e899553b7e97f837a5abff7..100ce4a4aff6ce1808a0993a7bc24ebeb3612468 100644 (file)
@@ -1167,6 +1167,8 @@ static inline bool efi_enabled(int feature)
 extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
 
 extern bool efi_is_table_address(unsigned long phys_addr);
+
+extern int efi_apply_persistent_mem_reservations(void);
 #else
 static inline bool efi_enabled(int feature)
 {
@@ -1185,6 +1187,11 @@ static inline bool efi_is_table_address(unsigned long phys_addr)
 {
        return false;
 }
+
+static inline int efi_apply_persistent_mem_reservations(void)
+{
+       return 0;
+}
 #endif
 
 extern int efi_status_to_err(efi_status_t status);
index 2827b87590d8d55893c87a1593be582994f4a7e5..387c70df6f29cc215f4678d6d19292e1f0b7ff20 100644 (file)
@@ -722,8 +722,8 @@ struct hid_usage_id {
  * input will not be passed to raw_event unless hid_device_io_start is
  * called.
  *
- * raw_event and event should return 0 on no action performed, 1 when no
- * further processing should be done and negative on error
+ * raw_event and event should return negative on error, any other value will
+ * pass the event on to .event() typically return 0 for success.
  *
  * input_mapping shall return a negative value to completely ignore this usage
  * (e.g. doubled or invalid usage), zero to continue with parsing of this
index e6bb36a97519b00f1aa5e438dc2f72501f93c682..8336b2f6f834627c462a0add39f4502aecfb5ed6 100644 (file)
@@ -21,6 +21,7 @@
 #define PIT_LATCH      ((PIT_TICK_RATE + HZ/2) / HZ)
 
 extern raw_spinlock_t i8253_lock;
+extern bool i8253_clear_counter_on_shutdown;
 extern struct clock_event_device i8253_clockevent;
 extern void clockevent_i8253_init(bool oneshot);
 
index fcf9cc9d535faf54c6b0fa463b6cf09643d0e5c4..5411de93a363e8a14bb980a30c8e5af67f25907e 100644 (file)
@@ -1744,11 +1744,15 @@ int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
 
 static inline void mm_inc_nr_puds(struct mm_struct *mm)
 {
+       if (mm_pud_folded(mm))
+               return;
        atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
 }
 
 static inline void mm_dec_nr_puds(struct mm_struct *mm)
 {
+       if (mm_pud_folded(mm))
+               return;
        atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
 }
 #endif
@@ -1768,11 +1772,15 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
 
 static inline void mm_inc_nr_pmds(struct mm_struct *mm)
 {
+       if (mm_pmd_folded(mm))
+               return;
        atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
 }
 
 static inline void mm_dec_nr_pmds(struct mm_struct *mm)
 {
+       if (mm_pmd_folded(mm))
+               return;
        atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
 }
 #endif
index abe975c87b9003a7301f0e879f6bdad733e6c583..7f53ece2c039aeb849ca929b2b13cb29bd172292 100644 (file)
@@ -324,9 +324,8 @@ static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
  */
 static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
 {
-       return (u64)nand->memorg.luns_per_target *
-              nand->memorg.eraseblocks_per_lun *
-              nand->memorg.pages_per_eraseblock;
+       return nand->memorg.ntargets * nand->memorg.luns_per_target *
+              nand->memorg.eraseblocks_per_lun;
 }
 
 /**
@@ -569,7 +568,7 @@ static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
 }
 
 /**
- * nanddev_pos_next_eraseblock() - Move a position to the next page
+ * nanddev_pos_next_page() - Move a position to the next page
  * @nand: NAND device
  * @pos: the position to update
  *
index dc1d9ed33b3192e9406b17c3107b3235b28ff1b9..857f8abf7b91bc79731873fc8f68e31f6bff4d03 100644 (file)
@@ -3190,6 +3190,26 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
 #endif
 }
 
+/* Variant of netdev_tx_sent_queue() for drivers that are aware
+ * that they should not test BQL status themselves.
+ * We do want to change __QUEUE_STATE_STACK_XOFF only for the last
+ * skb of a batch.
+ * Returns true if the doorbell must be used to kick the NIC.
+ */
+static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
+                                         unsigned int bytes,
+                                         bool xmit_more)
+{
+       if (xmit_more) {
+#ifdef CONFIG_BQL
+               dql_queued(&dev_queue->dql, bytes);
+#endif
+               return netif_tx_queue_stopped(dev_queue);
+       }
+       netdev_tx_sent_queue(dev_queue, bytes);
+       return true;
+}
+
 /**
  *     netdev_sent_queue - report the number of bytes queued to hardware
  *     @dev: network device
index 34fc80f3eb900deb8e4c21b10edf8909c469e7b4..1d100efe74ec76861084a4272327b662cf1de478 100644 (file)
@@ -314,7 +314,7 @@ enum {
 extern ip_set_id_t ip_set_get_byname(struct net *net,
                                     const char *name, struct ip_set **set);
 extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
-extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index);
+extern void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name);
 extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
 extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);
 
index 8e2bab1e8e90930f954ec7dc3a1b7a8179eecd13..70877f8de7e919d30716f0483610dfb12eeec433 100644 (file)
@@ -43,11 +43,11 @@ ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
        rcu_assign_pointer(comment->c, c);
 }
 
-/* Used only when dumping a set, protected by rcu_read_lock_bh() */
+/* Used only when dumping a set, protected by rcu_read_lock() */
 static inline int
 ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
 {
-       struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
+       struct ip_set_comment_rcu *c = rcu_dereference(comment->c);
 
        if (!c)
                return 0;
index 08f9247e9827e0056eb4d82c5c83a73a19cebd11..9003e29cde4615eb9a9e7785c675d36e1f24b8df 100644 (file)
@@ -119,6 +119,8 @@ static inline int hardlockup_detector_perf_init(void) { return 0; }
 void watchdog_nmi_stop(void);
 void watchdog_nmi_start(void);
 int watchdog_nmi_probe(void);
+int watchdog_nmi_enable(unsigned int cpu);
+void watchdog_nmi_disable(unsigned int cpu);
 
 /**
  * touch_nmi_watchdog - restart NMI watchdog timeout.
index d8a07a4f171dbfe21ee0e288d98658f304308735..a8f6d5d89524d3163a0c8f1dc835259f8d1dea51 100644 (file)
@@ -18,6 +18,8 @@ struct notifier_block;
 
 struct bio;
 
+struct pagevec;
+
 #define SWAP_FLAG_PREFER       0x8000  /* set if swap priority specified */
 #define SWAP_FLAG_PRIO_MASK    0x7fff
 #define SWAP_FLAG_PRIO_SHIFT   0
@@ -369,7 +371,7 @@ static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
 #endif
 
 extern int page_evictable(struct page *page);
-extern void check_move_unevictable_pages(struct page **, int nr_pages);
+extern void check_move_unevictable_pages(struct pagevec *pvec);
 
 extern int kswapd_run(int nid);
 extern void kswapd_stop(int nid);
index 14b789a123e7d9240cea72fc01bf0d4d7acdba9b..1656c59784987bd486ace6be1f10705fb47ac5c6 100644 (file)
@@ -317,6 +317,8 @@ bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
                         const struct in6_addr *addr);
 bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev,
                             const struct in6_addr *addr);
+int ipv6_anycast_init(void);
+void ipv6_anycast_cleanup(void);
 
 /* Device notifier */
 int register_inet6addr_notifier(struct notifier_block *nb);
index d7578cf49c3af85f2cd164a0b242d064b25ed23b..c9c78c15bce04eea71172ecad8693eb363bc2d60 100644 (file)
@@ -146,10 +146,12 @@ struct ifacaddr6 {
        struct in6_addr         aca_addr;
        struct fib6_info        *aca_rt;
        struct ifacaddr6        *aca_next;
+       struct hlist_node       aca_addr_lst;
        int                     aca_users;
        refcount_t              aca_refcnt;
        unsigned long           aca_cstamp;
        unsigned long           aca_tstamp;
+       struct rcu_head         rcu;
 };
 
 #define        IFA_HOST        IPV6_ADDR_LOOPBACK
index eed04af9b75e56b6c33d0887cdefa4c8f827251e..ae7b86f587f2c77c5e2e05972d67b070a39b8711 100644 (file)
@@ -153,4 +153,43 @@ void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
                               const char *fmt, ...) { }
 #endif /* CONFIG_SYSCTL */
 
+static inline struct nf_generic_net *nf_generic_pernet(struct net *net)
+{
+       return &net->ct.nf_ct_proto.generic;
+}
+
+static inline struct nf_tcp_net *nf_tcp_pernet(struct net *net)
+{
+       return &net->ct.nf_ct_proto.tcp;
+}
+
+static inline struct nf_udp_net *nf_udp_pernet(struct net *net)
+{
+       return &net->ct.nf_ct_proto.udp;
+}
+
+static inline struct nf_icmp_net *nf_icmp_pernet(struct net *net)
+{
+       return &net->ct.nf_ct_proto.icmp;
+}
+
+static inline struct nf_icmp_net *nf_icmpv6_pernet(struct net *net)
+{
+       return &net->ct.nf_ct_proto.icmpv6;
+}
+
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+static inline struct nf_dccp_net *nf_dccp_pernet(struct net *net)
+{
+       return &net->ct.nf_ct_proto.dccp;
+}
+#endif
+
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+static inline struct nf_sctp_net *nf_sctp_pernet(struct net *net)
+{
+       return &net->ct.nf_ct_proto.sctp;
+}
+#endif
+
 #endif /*_NF_CONNTRACK_PROTOCOL_H*/
index a9834c37ac40061d0e18209756c1d3be2c0497d7..c0e7d24ca25682acf384d56d7b0c87e71e6c456b 100644 (file)
@@ -31,8 +31,8 @@ TRACE_EVENT(kyber_latency,
 
        TP_fast_assign(
                __entry->dev            = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
-               strlcpy(__entry->domain, domain, DOMAIN_LEN);
-               strlcpy(__entry->type, type, DOMAIN_LEN);
+               strlcpy(__entry->domain, domain, sizeof(__entry->domain));
+               strlcpy(__entry->type, type, sizeof(__entry->type));
                __entry->percentile     = percentile;
                __entry->numerator      = numerator;
                __entry->denominator    = denominator;
@@ -60,7 +60,7 @@ TRACE_EVENT(kyber_adjust,
 
        TP_fast_assign(
                __entry->dev            = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
-               strlcpy(__entry->domain, domain, DOMAIN_LEN);
+               strlcpy(__entry->domain, domain, sizeof(__entry->domain));
                __entry->depth          = depth;
        ),
 
@@ -82,7 +82,7 @@ TRACE_EVENT(kyber_throttled,
 
        TP_fast_assign(
                __entry->dev            = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
-               strlcpy(__entry->domain, domain, DOMAIN_LEN);
+               strlcpy(__entry->domain, domain, sizeof(__entry->domain));
        ),
 
        TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev),
index 370e9a5536ef5255bf91f497c08e86298f57655c..be84e43c1e19e916e8d7de8dc14b498a4dbc6a28 100644 (file)
@@ -326,6 +326,12 @@ struct drm_amdgpu_gem_userptr {
 /* GFX9 and later: */
 #define AMDGPU_TILING_SWIZZLE_MODE_SHIFT               0
 #define AMDGPU_TILING_SWIZZLE_MODE_MASK                        0x1f
+#define AMDGPU_TILING_DCC_OFFSET_256B_SHIFT            5
+#define AMDGPU_TILING_DCC_OFFSET_256B_MASK             0xFFFFFF
+#define AMDGPU_TILING_DCC_PITCH_MAX_SHIFT              29
+#define AMDGPU_TILING_DCC_PITCH_MAX_MASK               0x3FFF
+#define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT                43
+#define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK         0x1
 
 /* Set/Get helpers for tiling flags. */
 #define AMDGPU_TILING_SET(field, value) \
index a4446f452040aa2bdb15dfd8c28c320b073f9bf0..298b2e197744bbc28782d1a853e1ee3577f02bee 100644 (file)
@@ -412,6 +412,14 @@ typedef struct drm_i915_irq_wait {
        int irq_seq;
 } drm_i915_irq_wait_t;
 
+/*
+ * Different modes of per-process Graphics Translation Table,
+ * see I915_PARAM_HAS_ALIASING_PPGTT
+ */
+#define I915_GEM_PPGTT_NONE    0
+#define I915_GEM_PPGTT_ALIASING        1
+#define I915_GEM_PPGTT_FULL    2
+
 /* Ioctl to query kernel params:
  */
 #define I915_PARAM_IRQ_ACTIVE            1
index f5ff8a76e208fc45584bb76503860c6fdf6650fd..b01eb502d49c55d04f33cace28a410171239eaf5 100644 (file)
@@ -83,11 +83,11 @@ struct kfd_ioctl_set_cu_mask_args {
 };
 
 struct kfd_ioctl_get_queue_wave_state_args {
-       uint64_t ctl_stack_address;     /* to KFD */
-       uint32_t ctl_stack_used_size;   /* from KFD */
-       uint32_t save_area_used_size;   /* from KFD */
-       uint32_t queue_id;              /* to KFD */
-       uint32_t pad;
+       __u64 ctl_stack_address;        /* to KFD */
+       __u32 ctl_stack_used_size;      /* from KFD */
+       __u32 save_area_used_size;      /* from KFD */
+       __u32 queue_id;                 /* to KFD */
+       __u32 pad;
 };
 
 /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
@@ -255,10 +255,10 @@ struct kfd_hsa_memory_exception_data {
 
 /* hw exception data */
 struct kfd_hsa_hw_exception_data {
-       uint32_t reset_type;
-       uint32_t reset_cause;
-       uint32_t memory_lost;
-       uint32_t gpu_id;
+       __u32 reset_type;
+       __u32 reset_cause;
+       __u32 memory_lost;
+       __u32 gpu_id;
 };
 
 /* Event data */
index 579974b0bf0d8140882ff2a48f96184f0f829456..7de4f1bdaf06a28a7e64fb9d72fba3d42d0032b8 100644 (file)
@@ -1635,8 +1635,8 @@ enum nft_ng_attributes {
        NFTA_NG_MODULUS,
        NFTA_NG_TYPE,
        NFTA_NG_OFFSET,
-       NFTA_NG_SET_NAME,
-       NFTA_NG_SET_ID,
+       NFTA_NG_SET_NAME,       /* deprecated */
+       NFTA_NG_SET_ID,         /* deprecated */
        __NFTA_NG_MAX
 };
 #define NFTA_NG_MAX    (__NFTA_NG_MAX - 1)
index 156ccd089df184853c180a240bfaaaa27774a4fe..1610fdbab98dfc89212ee653a573da8c39bdefe8 100644 (file)
 #include <linux/if_vlan.h>
 #include <linux/if_pppox.h>
 
+#ifndef __KERNEL__
+#include <limits.h> /* for INT_MIN, INT_MAX */
+#endif
+
 /* Bridge Hooks */
 /* After promisc drops, checksum checks. */
 #define NF_BR_PRE_ROUTING      0
index 34dd3d497f2cc52b6742d5bf89fa1e88aa947d57..c81feb373d3ea597a7d2c66ad203ad18ed821189 100644 (file)
@@ -568,6 +568,8 @@ struct sctp_assoc_reset_event {
 
 #define SCTP_ASSOC_CHANGE_DENIED       0x0004
 #define SCTP_ASSOC_CHANGE_FAILED       0x0008
+#define SCTP_STREAM_CHANGE_DENIED      SCTP_ASSOC_CHANGE_DENIED
+#define SCTP_STREAM_CHANGE_FAILED      SCTP_ASSOC_CHANGE_FAILED
 struct sctp_stream_change_event {
        __u16 strchange_type;
        __u16 strchange_flags;
@@ -1151,6 +1153,7 @@ struct sctp_add_streams {
 /* SCTP Stream schedulers */
 enum sctp_sched_type {
        SCTP_SS_FCFS,
+       SCTP_SS_DEFAULT = SCTP_SS_FCFS,
        SCTP_SS_PRIO,
        SCTP_SS_RR,
        SCTP_SS_MAX = SCTP_SS_RR
index 18803ff76e27808bc8263284bbd089c32877ba05..4969817124a8d7c6b462aeb18f54105d1d49e2e3 100644 (file)
@@ -42,16 +42,12 @@ int xen_setup_shutdown_event(void);
 
 extern unsigned long *xen_contiguous_bitmap;
 
-#ifdef CONFIG_XEN_PV
+#if defined(CONFIG_XEN_PV) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
                                unsigned int address_bits,
                                dma_addr_t *dma_handle);
 
 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
-
-int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
-                 xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
-                 unsigned int domid, bool no_translate, struct page **pages);
 #else
 static inline int xen_create_contiguous_region(phys_addr_t pstart,
                                               unsigned int order,
@@ -63,7 +59,13 @@ static inline int xen_create_contiguous_region(phys_addr_t pstart,
 
 static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
                                                 unsigned int order) { }
+#endif
 
+#if defined(CONFIG_XEN_PV)
+int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
+                 xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
+                 unsigned int domid, bool no_translate, struct page **pages);
+#else
 static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
                                xen_pfn_t *pfn, int nr, int *err_ptr,
                                pgprot_t prot,  unsigned int domid,
index 6377225b208204c1c2d8829a778f50ebaa7d816d..1a796e0799ec4a524aee5c325734f0ab22a410ac 100644 (file)
@@ -553,7 +553,6 @@ bool is_bpf_text_address(unsigned long addr)
 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
                    char *sym)
 {
-       unsigned long symbol_start, symbol_end;
        struct bpf_prog_aux *aux;
        unsigned int it = 0;
        int ret = -ERANGE;
@@ -566,10 +565,9 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
                if (it++ != symnum)
                        continue;
 
-               bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
                bpf_get_prog_name(aux->prog, sym);
 
-               *value = symbol_start;
+               *value = (unsigned long)aux->prog->bpf_func;
                *type  = BPF_SYM_ELF_TYPE;
 
                ret = 0;
index ccb93277aae2c607e7b6ef079e5432d89ef4a1f6..cf5040fd54344dd798f73464eadb5b9684300f1c 100644 (file)
@@ -2078,6 +2078,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
                info.jited_prog_len = 0;
                info.xlated_prog_len = 0;
                info.nr_jited_ksyms = 0;
+               info.nr_jited_func_lens = 0;
                goto done;
        }
 
@@ -2158,11 +2159,11 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
        }
 
        ulen = info.nr_jited_ksyms;
-       info.nr_jited_ksyms = prog->aux->func_cnt;
+       info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
        if (info.nr_jited_ksyms && ulen) {
                if (bpf_dump_raw_ok()) {
+                       unsigned long ksym_addr;
                        u64 __user *user_ksyms;
-                       ulong ksym_addr;
                        u32 i;
 
                        /* copy the address of the kernel symbol
@@ -2170,10 +2171,17 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
                         */
                        ulen = min_t(u32, info.nr_jited_ksyms, ulen);
                        user_ksyms = u64_to_user_ptr(info.jited_ksyms);
-                       for (i = 0; i < ulen; i++) {
-                               ksym_addr = (ulong) prog->aux->func[i]->bpf_func;
-                               ksym_addr &= PAGE_MASK;
-                               if (put_user((u64) ksym_addr, &user_ksyms[i]))
+                       if (prog->aux->func_cnt) {
+                               for (i = 0; i < ulen; i++) {
+                                       ksym_addr = (unsigned long)
+                                               prog->aux->func[i]->bpf_func;
+                                       if (put_user((u64) ksym_addr,
+                                                    &user_ksyms[i]))
+                                               return -EFAULT;
+                               }
+                       } else {
+                               ksym_addr = (unsigned long) prog->bpf_func;
+                               if (put_user((u64) ksym_addr, &user_ksyms[0]))
                                        return -EFAULT;
                        }
                } else {
@@ -2182,7 +2190,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
        }
 
        ulen = info.nr_jited_func_lens;
-       info.nr_jited_func_lens = prog->aux->func_cnt;
+       info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
        if (info.nr_jited_func_lens && ulen) {
                if (bpf_dump_raw_ok()) {
                        u32 __user *user_lens;
@@ -2191,9 +2199,16 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
                        /* copy the JITed image lengths for each function */
                        ulen = min_t(u32, info.nr_jited_func_lens, ulen);
                        user_lens = u64_to_user_ptr(info.jited_func_lens);
-                       for (i = 0; i < ulen; i++) {
-                               func_len = prog->aux->func[i]->jited_len;
-                               if (put_user(func_len, &user_lens[i]))
+                       if (prog->aux->func_cnt) {
+                               for (i = 0; i < ulen; i++) {
+                                       func_len =
+                                               prog->aux->func[i]->jited_len;
+                                       if (put_user(func_len, &user_lens[i]))
+                                               return -EFAULT;
+                               }
+                       } else {
+                               func_len = prog->jited_len;
+                               if (put_user(func_len, &user_lens[0]))
                                        return -EFAULT;
                        }
                } else {
index 6ad4a9fcbd6f7012ca577070455bec9a1cd03e57..7921ae4fca8de92513fb5b71cbc36fd96ed0e04f 100644 (file)
@@ -179,14 +179,14 @@ kdb_bt(int argc, const char **argv)
                                kdb_printf("no process for cpu %ld\n", cpu);
                                return 0;
                        }
-                       sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu));
+                       sprintf(buf, "btt 0x%px\n", KDB_TSK(cpu));
                        kdb_parse(buf);
                        return 0;
                }
                kdb_printf("btc: cpu status: ");
                kdb_parse("cpu\n");
                for_each_online_cpu(cpu) {
-                       sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu));
+                       sprintf(buf, "btt 0x%px\n", KDB_TSK(cpu));
                        kdb_parse(buf);
                        touch_nmi_watchdog();
                }
index ed5d34925ad0617a40aeed3774b0e393aec03e99..6a4b41484afe654572f3b4f37186046c4541c8e5 100644 (file)
@@ -216,7 +216,7 @@ static char *kdb_read(char *buffer, size_t bufsize)
        int count;
        int i;
        int diag, dtab_count;
-       int key;
+       int key, buf_size, ret;
 
 
        diag = kdbgetintenv("DTABCOUNT", &dtab_count);
@@ -336,9 +336,8 @@ poll_again:
                else
                        p_tmp = tmpbuffer;
                len = strlen(p_tmp);
-               count = kallsyms_symbol_complete(p_tmp,
-                                                sizeof(tmpbuffer) -
-                                                (p_tmp - tmpbuffer));
+               buf_size = sizeof(tmpbuffer) - (p_tmp - tmpbuffer);
+               count = kallsyms_symbol_complete(p_tmp, buf_size);
                if (tab == 2 && count > 0) {
                        kdb_printf("\n%d symbols are found.", count);
                        if (count > dtab_count) {
@@ -350,9 +349,13 @@ poll_again:
                        }
                        kdb_printf("\n");
                        for (i = 0; i < count; i++) {
-                               if (WARN_ON(!kallsyms_symbol_next(p_tmp, i)))
+                               ret = kallsyms_symbol_next(p_tmp, i, buf_size);
+                               if (WARN_ON(!ret))
                                        break;
-                               kdb_printf("%s ", p_tmp);
+                               if (ret != -E2BIG)
+                                       kdb_printf("%s ", p_tmp);
+                               else
+                                       kdb_printf("%s... ", p_tmp);
                                *(p_tmp + len) = '\0';
                        }
                        if (i >= dtab_count)
index 118527aa60eae183f6d3b882ff2102a0c9a6012a..750497b0003a6decd80a97051aec97347079fd93 100644 (file)
@@ -173,11 +173,11 @@ int kdb_get_kbd_char(void)
        case KT_LATIN:
                if (isprint(keychar))
                        break;          /* printable characters */
-               /* drop through */
+               /* fall through */
        case KT_SPEC:
                if (keychar == K_ENTER)
                        break;
-               /* drop through */
+               /* fall through */
        default:
                return -1;      /* ignore unprintables */
        }
index bb4fe4e1a601b5252197f20babd6da394aaa0bc9..d72b32c66f7dd3ba3f5cf254b4d8a66da21d259d 100644 (file)
@@ -1192,7 +1192,7 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
        if (reason == KDB_REASON_DEBUG) {
                /* special case below */
        } else {
-               kdb_printf("\nEntering kdb (current=0x%p, pid %d) ",
+               kdb_printf("\nEntering kdb (current=0x%px, pid %d) ",
                           kdb_current, kdb_current ? kdb_current->pid : 0);
 #if defined(CONFIG_SMP)
                kdb_printf("on processor %d ", raw_smp_processor_id());
@@ -1208,7 +1208,7 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
                 */
                switch (db_result) {
                case KDB_DB_BPT:
-                       kdb_printf("\nEntering kdb (0x%p, pid %d) ",
+                       kdb_printf("\nEntering kdb (0x%px, pid %d) ",
                                   kdb_current, kdb_current->pid);
 #if defined(CONFIG_SMP)
                        kdb_printf("on processor %d ", raw_smp_processor_id());
@@ -1493,6 +1493,7 @@ static void kdb_md_line(const char *fmtstr, unsigned long addr,
        char cbuf[32];
        char *c = cbuf;
        int i;
+       int j;
        unsigned long word;
 
        memset(cbuf, '\0', sizeof(cbuf));
@@ -1538,25 +1539,9 @@ static void kdb_md_line(const char *fmtstr, unsigned long addr,
                        wc.word = word;
 #define printable_char(c) \
        ({unsigned char __c = c; isascii(__c) && isprint(__c) ? __c : '.'; })
-                       switch (bytesperword) {
-                       case 8:
+                       for (j = 0; j < bytesperword; j++)
                                *c++ = printable_char(*cp++);
-                               *c++ = printable_char(*cp++);
-                               *c++ = printable_char(*cp++);
-                               *c++ = printable_char(*cp++);
-                               addr += 4;
-                       case 4:
-                               *c++ = printable_char(*cp++);
-                               *c++ = printable_char(*cp++);
-                               addr += 2;
-                       case 2:
-                               *c++ = printable_char(*cp++);
-                               addr++;
-                       case 1:
-                               *c++ = printable_char(*cp++);
-                               addr++;
-                               break;
-                       }
+                       addr += bytesperword;
 #undef printable_char
                }
        }
@@ -2048,7 +2033,7 @@ static int kdb_lsmod(int argc, const char **argv)
                if (mod->state == MODULE_STATE_UNFORMED)
                        continue;
 
-               kdb_printf("%-20s%8u  0x%p ", mod->name,
+               kdb_printf("%-20s%8u  0x%px ", mod->name,
                           mod->core_layout.size, (void *)mod);
 #ifdef CONFIG_MODULE_UNLOAD
                kdb_printf("%4d ", module_refcount(mod));
@@ -2059,7 +2044,7 @@ static int kdb_lsmod(int argc, const char **argv)
                        kdb_printf(" (Loading)");
                else
                        kdb_printf(" (Live)");
-               kdb_printf(" 0x%p", mod->core_layout.base);
+               kdb_printf(" 0x%px", mod->core_layout.base);
 
 #ifdef CONFIG_MODULE_UNLOAD
                {
@@ -2341,7 +2326,7 @@ void kdb_ps1(const struct task_struct *p)
                return;
 
        cpu = kdb_process_cpu(p);
-       kdb_printf("0x%p %8d %8d  %d %4d   %c  0x%p %c%s\n",
+       kdb_printf("0x%px %8d %8d  %d %4d   %c  0x%px %c%s\n",
                   (void *)p, p->pid, p->parent->pid,
                   kdb_task_has_cpu(p), kdb_process_cpu(p),
                   kdb_task_state_char(p),
@@ -2354,7 +2339,7 @@ void kdb_ps1(const struct task_struct *p)
                } else {
                        if (KDB_TSK(cpu) != p)
                                kdb_printf("  Error: does not match running "
-                                  "process table (0x%p)\n", KDB_TSK(cpu));
+                                  "process table (0x%px)\n", KDB_TSK(cpu));
                }
        }
 }
@@ -2687,7 +2672,7 @@ int kdb_register_flags(char *cmd,
        for_each_kdbcmd(kp, i) {
                if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) {
                        kdb_printf("Duplicate kdb command registered: "
-                               "%s, func %p help %s\n", cmd, func, help);
+                               "%s, func %px help %s\n", cmd, func, help);
                        return 1;
                }
        }
index 1e5a502ba4a7b44787a097540cc21787cce41f5d..2118d8258b7c9a3d66b917f10575f7b109b5d665 100644 (file)
@@ -83,7 +83,7 @@ typedef struct __ksymtab {
                unsigned long sym_start;
                unsigned long sym_end;
                } kdb_symtab_t;
-extern int kallsyms_symbol_next(char *prefix_name, int flag);
+extern int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size);
 extern int kallsyms_symbol_complete(char *prefix_name, int max_len);
 
 /* Exported Symbols for kernel loadable modules to use. */
index 990b3cc526c80d2162d79f0524dbd83932418cdf..50bf9b119bad04952c767451a4e29315b0234ab0 100644 (file)
@@ -40,7 +40,7 @@
 int kdbgetsymval(const char *symname, kdb_symtab_t *symtab)
 {
        if (KDB_DEBUG(AR))
-               kdb_printf("kdbgetsymval: symname=%s, symtab=%p\n", symname,
+               kdb_printf("kdbgetsymval: symname=%s, symtab=%px\n", symname,
                           symtab);
        memset(symtab, 0, sizeof(*symtab));
        symtab->sym_start = kallsyms_lookup_name(symname);
@@ -88,7 +88,7 @@ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
        char *knt1 = NULL;
 
        if (KDB_DEBUG(AR))
-               kdb_printf("kdbnearsym: addr=0x%lx, symtab=%p\n", addr, symtab);
+               kdb_printf("kdbnearsym: addr=0x%lx, symtab=%px\n", addr, symtab);
        memset(symtab, 0, sizeof(*symtab));
 
        if (addr < 4096)
@@ -149,7 +149,7 @@ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
                symtab->mod_name = "kernel";
        if (KDB_DEBUG(AR))
                kdb_printf("kdbnearsym: returns %d symtab->sym_start=0x%lx, "
-                  "symtab->mod_name=%p, symtab->sym_name=%p (%s)\n", ret,
+                  "symtab->mod_name=%px, symtab->sym_name=%px (%s)\n", ret,
                   symtab->sym_start, symtab->mod_name, symtab->sym_name,
                   symtab->sym_name);
 
@@ -221,11 +221,13 @@ int kallsyms_symbol_complete(char *prefix_name, int max_len)
  * Parameters:
  *     prefix_name     prefix of a symbol name to lookup
  *     flag    0 means search from the head, 1 means continue search.
+ *     buf_size        maximum length that can be written to prefix_name
+ *                     buffer
  * Returns:
  *     1 if a symbol matches the given prefix.
  *     0 if no string found
  */
-int kallsyms_symbol_next(char *prefix_name, int flag)
+int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size)
 {
        int prefix_len = strlen(prefix_name);
        static loff_t pos;
@@ -235,10 +237,8 @@ int kallsyms_symbol_next(char *prefix_name, int flag)
                pos = 0;
 
        while ((name = kdb_walk_kallsyms(&pos))) {
-               if (strncmp(name, prefix_name, prefix_len) == 0) {
-                       strncpy(prefix_name, name, strlen(name)+1);
-                       return 1;
-               }
+               if (!strncmp(name, prefix_name, prefix_len))
+                       return strscpy(prefix_name, name, buf_size);
        }
        return 0;
 }
@@ -432,7 +432,7 @@ int kdb_getphysword(unsigned long *word, unsigned long addr, size_t size)
                                *word = w8;
                        break;
                }
-               /* drop through */
+               /* fall through */
        default:
                diag = KDB_BADWIDTH;
                kdb_printf("kdb_getphysword: bad width %ld\n", (long) size);
@@ -481,7 +481,7 @@ int kdb_getword(unsigned long *word, unsigned long addr, size_t size)
                                *word = w8;
                        break;
                }
-               /* drop through */
+               /* fall through */
        default:
                diag = KDB_BADWIDTH;
                kdb_printf("kdb_getword: bad width %ld\n", (long) size);
@@ -525,7 +525,7 @@ int kdb_putword(unsigned long addr, unsigned long word, size_t size)
                        diag = kdb_putarea(addr, w8);
                        break;
                }
-               /* drop through */
+               /* fall through */
        default:
                diag = KDB_BADWIDTH;
                kdb_printf("kdb_putword: bad width %ld\n", (long) size);
@@ -887,13 +887,13 @@ void debug_kusage(void)
                   __func__, dah_first);
        if (dah_first) {
                h_used = (struct debug_alloc_header *)debug_alloc_pool;
-               kdb_printf("%s: h_used %p size %d\n", __func__, h_used,
+               kdb_printf("%s: h_used %px size %d\n", __func__, h_used,
                           h_used->size);
        }
        do {
                h_used = (struct debug_alloc_header *)
                          ((char *)h_free + dah_overhead + h_free->size);
-               kdb_printf("%s: h_used %p size %d caller %p\n",
+               kdb_printf("%s: h_used %px size %d caller %px\n",
                           __func__, h_used, h_used->size, h_used->caller);
                h_free = (struct debug_alloc_header *)
                          (debug_alloc_pool + h_free->next);
@@ -902,7 +902,7 @@ void debug_kusage(void)
                  ((char *)h_free + dah_overhead + h_free->size);
        if ((char *)h_used - debug_alloc_pool !=
            sizeof(debug_alloc_pool_aligned))
-               kdb_printf("%s: h_used %p size %d caller %p\n",
+               kdb_printf("%s: h_used %px size %d caller %px\n",
                           __func__, h_used, h_used->size, h_used->caller);
 out:
        spin_unlock(&dap_lock);
index b3a3a1fc499eaf386b3b3e1c51f2100b7226ff37..b0fbf685c77a52ba45dc4c3e2044782080a74953 100644 (file)
@@ -319,16 +319,23 @@ int release_resource(struct resource *old)
 EXPORT_SYMBOL(release_resource);
 
 /**
- * Finds the lowest iomem resource that covers part of [start..end].  The
- * caller must specify start, end, flags, and desc (which may be
+ * Finds the lowest iomem resource that covers part of [@start..@end].  The
+ * caller must specify @start, @end, @flags, and @desc (which may be
  * IORES_DESC_NONE).
  *
- * If a resource is found, returns 0 and *res is overwritten with the part
- * of the resource that's within [start..end]; if none is found, returns
- * -1.
+ * If a resource is found, returns 0 and @*res is overwritten with the part
+ * of the resource that's within [@start..@end]; if none is found, returns
+ * -1 or -EINVAL for other invalid parameters.
  *
  * This function walks the whole tree and not just first level children
  * unless @first_lvl is true.
+ *
+ * @start:     start address of the resource searched for
+ * @end:       end address of same resource
+ * @flags:     flags which the resource must have
+ * @desc:      descriptor the resource must have
+ * @first_lvl: walk only the first level children, if set
+ * @res:       return ptr, if resource found
  */
 static int find_next_iomem_res(resource_size_t start, resource_size_t end,
                               unsigned long flags, unsigned long desc,
@@ -399,6 +406,8 @@ static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
  * @flags: I/O resource flags
  * @start: start addr
  * @end: end addr
+ * @arg: function argument for the callback @func
+ * @func: callback function that is called for each qualifying resource area
  *
  * NOTE: For a new descriptor search, define a new IORES_DESC in
  * <linux/ioport.h> and set it in 'desc' of a target resource entry.
index f12225f26b70a630ac185cfccba2b140f191c47e..091e089063be1dc25ab1180fec3f99a4408fb024 100644 (file)
@@ -5851,11 +5851,14 @@ void __init sched_init_smp(void)
        /*
         * There's no userspace yet to cause hotplug operations; hence all the
         * CPU masks are stable and all blatant races in the below code cannot
-        * happen.
+        * happen. The hotplug lock is nevertheless taken to satisfy lockdep,
+        * but there won't be any contention on it.
         */
+       cpus_read_lock();
        mutex_lock(&sched_domains_mutex);
        sched_init_domains(cpu_active_mask);
        mutex_unlock(&sched_domains_mutex);
+       cpus_read_unlock();
 
        /* Move init over to a non-isolated CPU */
        if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
index ee271bb661cc923dfa67ae5d5c45a18c71df7cb1..ac855b2f47746efa80ed91081442626d15169568 100644 (file)
@@ -2400,8 +2400,8 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
                local = 1;
 
        /*
-        * Retry task to preferred node migration periodically, in case it
-        * case it previously failed, or the scheduler moved us.
+        * Retry to migrate task to preferred node periodically, in case it
+        * previously failed, or the scheduler moved us.
         */
        if (time_after(jiffies, p->numa_migrate_retry)) {
                task_numa_placement(p);
@@ -5674,11 +5674,11 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
        return target;
 }
 
-static unsigned long cpu_util_wake(int cpu, struct task_struct *p);
+static unsigned long cpu_util_without(int cpu, struct task_struct *p);
 
-static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
+static unsigned long capacity_spare_without(int cpu, struct task_struct *p)
 {
-       return max_t(long, capacity_of(cpu) - cpu_util_wake(cpu, p), 0);
+       return max_t(long, capacity_of(cpu) - cpu_util_without(cpu, p), 0);
 }
 
 /*
@@ -5738,7 +5738,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
 
                        avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
 
-                       spare_cap = capacity_spare_wake(i, p);
+                       spare_cap = capacity_spare_without(i, p);
 
                        if (spare_cap > max_spare_cap)
                                max_spare_cap = spare_cap;
@@ -5889,8 +5889,8 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
                return prev_cpu;
 
        /*
-        * We need task's util for capacity_spare_wake, sync it up to prev_cpu's
-        * last_update_time.
+        * We need task's util for capacity_spare_without, sync it up to
+        * prev_cpu's last_update_time.
         */
        if (!(sd_flag & SD_BALANCE_FORK))
                sync_entity_load_avg(&p->se);
@@ -6216,10 +6216,19 @@ static inline unsigned long cpu_util(int cpu)
 }
 
 /*
- * cpu_util_wake: Compute CPU utilization with any contributions from
- * the waking task p removed.
+ * cpu_util_without: compute cpu utilization without any contributions from *p
+ * @cpu: the CPU which utilization is requested
+ * @p: the task which utilization should be discounted
+ *
+ * The utilization of a CPU is defined by the utilization of tasks currently
+ * enqueued on that CPU as well as tasks which are currently sleeping after an
+ * execution on that CPU.
+ *
+ * This method returns the utilization of the specified CPU by discounting the
+ * utilization of the specified task, whenever the task is currently
+ * contributing to the CPU utilization.
  */
-static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
+static unsigned long cpu_util_without(int cpu, struct task_struct *p)
 {
        struct cfs_rq *cfs_rq;
        unsigned int util;
@@ -6231,7 +6240,7 @@ static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
        cfs_rq = &cpu_rq(cpu)->cfs;
        util = READ_ONCE(cfs_rq->avg.util_avg);
 
-       /* Discount task's blocked util from CPU's util */
+       /* Discount task's util from CPU's util */
        util -= min_t(unsigned int, util, task_util(p));
 
        /*
@@ -6240,14 +6249,14 @@ static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
         * a) if *p is the only task sleeping on this CPU, then:
         *      cpu_util (== task_util) > util_est (== 0)
         *    and thus we return:
-        *      cpu_util_wake = (cpu_util - task_util) = 0
+        *      cpu_util_without = (cpu_util - task_util) = 0
         *
         * b) if other tasks are SLEEPING on this CPU, which is now exiting
         *    IDLE, then:
         *      cpu_util >= task_util
         *      cpu_util > util_est (== 0)
         *    and thus we discount *p's blocked utilization to return:
-        *      cpu_util_wake = (cpu_util - task_util) >= 0
+        *      cpu_util_without = (cpu_util - task_util) >= 0
         *
         * c) if other tasks are RUNNABLE on that CPU and
         *      util_est > cpu_util
@@ -6260,8 +6269,33 @@ static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
         * covered by the following code when estimated utilization is
         * enabled.
         */
-       if (sched_feat(UTIL_EST))
-               util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
+       if (sched_feat(UTIL_EST)) {
+               unsigned int estimated =
+                       READ_ONCE(cfs_rq->avg.util_est.enqueued);
+
+               /*
+                * Despite the following checks we still have a small window
+                * for a possible race, when an execl's select_task_rq_fair()
+                * races with LB's detach_task():
+                *
+                *   detach_task()
+                *     p->on_rq = TASK_ON_RQ_MIGRATING;
+                *     ---------------------------------- A
+                *     deactivate_task()                   \
+                *       dequeue_task()                     + RaceTime
+                *         util_est_dequeue()              /
+                *     ---------------------------------- B
+                *
+                * The additional check on "current == p" it's required to
+                * properly fix the execl regression and it helps in further
+                * reducing the chances for the above race.
+                */
+               if (unlikely(task_on_rq_queued(p) || current == p)) {
+                       estimated -= min_t(unsigned int, estimated,
+                                          (_task_util_est(p) | UTIL_AVG_UNCHANGED));
+               }
+               util = max(util, estimated);
+       }
 
        /*
         * Utilization (estimated) can exceed the CPU capacity, thus let's
index 7cdecfc010af83f1f5d8679536433f288aa847d7..3d7355d7c3e3852a085a2d68de6e7bbc705c5071 100644 (file)
@@ -633,38 +633,39 @@ void psi_cgroup_free(struct cgroup *cgroup)
  */
 void cgroup_move_task(struct task_struct *task, struct css_set *to)
 {
-       bool move_psi = !psi_disabled;
        unsigned int task_flags = 0;
        struct rq_flags rf;
        struct rq *rq;
 
-       if (move_psi) {
-               rq = task_rq_lock(task, &rf);
+       if (psi_disabled) {
+               /*
+                * Lame to do this here, but the scheduler cannot be locked
+                * from the outside, so we move cgroups from inside sched/.
+                */
+               rcu_assign_pointer(task->cgroups, to);
+               return;
+       }
 
-               if (task_on_rq_queued(task))
-                       task_flags = TSK_RUNNING;
-               else if (task->in_iowait)
-                       task_flags = TSK_IOWAIT;
+       rq = task_rq_lock(task, &rf);
 
-               if (task->flags & PF_MEMSTALL)
-                       task_flags |= TSK_MEMSTALL;
+       if (task_on_rq_queued(task))
+               task_flags = TSK_RUNNING;
+       else if (task->in_iowait)
+               task_flags = TSK_IOWAIT;
 
-               if (task_flags)
-                       psi_task_change(task, task_flags, 0);
-       }
+       if (task->flags & PF_MEMSTALL)
+               task_flags |= TSK_MEMSTALL;
 
-       /*
-        * Lame to do this here, but the scheduler cannot be locked
-        * from the outside, so we move cgroups from inside sched/.
-        */
+       if (task_flags)
+               psi_task_change(task, task_flags, 0);
+
+       /* See comment above */
        rcu_assign_pointer(task->cgroups, to);
 
-       if (move_psi) {
-               if (task_flags)
-                       psi_task_change(task, 0, task_flags);
+       if (task_flags)
+               psi_task_change(task, 0, task_flags);
 
-               task_rq_unlock(rq, task, &rf);
-       }
+       task_rq_unlock(rq, task, &rf);
 }
 #endif /* CONFIG_CGROUPS */
 
index ce32cf741b250939de562ab05bd598fb30f10986..8f0644af40be7e5869f8f664775183bdb07a0f56 100644 (file)
@@ -917,9 +917,6 @@ static void check_process_timers(struct task_struct *tsk,
        struct task_cputime cputime;
        unsigned long soft;
 
-       if (dl_task(tsk))
-               check_dl_overrun(tsk);
-
        /*
         * If cputimer is not running, then there are no active
         * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
index 3ef15a6683c002bc2c5402b5be8ad07c903021bc..bd30e9398d2a8b2afbc49839c969efd277ae2728 100644 (file)
@@ -535,7 +535,7 @@ int traceprobe_update_arg(struct probe_arg *arg)
                        if (code[1].op != FETCH_OP_IMM)
                                return -EINVAL;
 
-                       tmp = strpbrk("+-", code->data);
+                       tmp = strpbrk(code->data, "+-");
                        if (tmp)
                                c = *tmp;
                        ret = traceprobe_split_symbol_offset(code->data,
index e5222b5fb4fe6c3868c78ee3602518ce835c85e3..923414a246e9e4eb4bd422e8146133cad50db45f 100644 (file)
@@ -974,10 +974,6 @@ static ssize_t map_write(struct file *file, const char __user *buf,
        if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
                goto out;
 
-       ret = sort_idmaps(&new_map);
-       if (ret < 0)
-               goto out;
-
        ret = -EPERM;
        /* Map the lower ids from the parent user namespace to the
         * kernel global id space.
@@ -1004,6 +1000,14 @@ static ssize_t map_write(struct file *file, const char __user *buf,
                e->lower_first = lower_first;
        }
 
+       /*
+        * If we want to use binary search for lookup, this clones the extent
+        * array and sorts both copies.
+        */
+       ret = sort_idmaps(&new_map);
+       if (ret < 0)
+               goto out;
+
        /* Install the map */
        if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) {
                memcpy(map->extent, new_map.extent,
index 5d73f5cb4d8a78f0887cc6cfbf10a29ec5d5f51c..79777645cac9c1243518f4f4bf403cdc567aa9ea 100644 (file)
@@ -27,7 +27,7 @@ ifeq ($(ARCH),arm)
         CFLAGS += -I../../../arch/arm/include -mfpu=neon
         HAS_NEON = yes
 endif
-ifeq ($(ARCH),arm64)
+ifeq ($(ARCH),aarch64)
         CFLAGS += -I../../../arch/arm64/include
         HAS_NEON = yes
 endif
@@ -41,7 +41,7 @@ ifeq ($(IS_X86),yes)
                    gcc -c -x assembler - >&/dev/null &&        \
                    rm ./-.o && echo -DCONFIG_AS_AVX512=1)
 else ifeq ($(HAS_NEON),yes)
-        OBJS   += neon.o neon1.o neon2.o neon4.o neon8.o
+        OBJS   += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
         CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
 else
         HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\
index 59fee96c29a0f1fb83fe67282482bc5d941bd6f3..e4162f59a81ccacda275cd218193fb2ad34d71d3 100644 (file)
@@ -427,8 +427,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
 EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds);
 
 
-void __noreturn
-__ubsan_handle_builtin_unreachable(struct unreachable_data *data)
+void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
 {
        unsigned long flags;
 
index f76e77a2d34b79afec5f3032366a6bd954d1aead..aa43620a3270ec08040f75e587d4cbe7617754ff 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -385,11 +385,17 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
  * @vma: vm_area_struct mapping @address
  * @address: virtual address to look up
  * @flags: flags modifying lookup behaviour
- * @page_mask: on output, *page_mask is set according to the size of the page
+ * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
+ *       pointer to output page_mask
  *
  * @flags can have FOLL_ flags set, defined in <linux/mm.h>
  *
- * Returns the mapped (struct page *), %NULL if no mapping exists, or
+ * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
+ * the device's dev_pagemap metadata to avoid repeating expensive lookups.
+ *
+ * On output, the @ctx->page_mask is set according to the size of the page.
+ *
+ * Return: the mapped (struct page *), %NULL if no mapping exists, or
  * an error pointer if there is a mapping to something not represented
  * by a page descriptor (see also vm_normal_page()).
  */
index c007fb5fb8d5f6547dc916e5ab468753028a862b..7f2a28ab46d537ff4ed5034cce0ea46b95991477 100644 (file)
@@ -3233,7 +3233,7 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte)
 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                            struct vm_area_struct *vma)
 {
-       pte_t *src_pte, *dst_pte, entry;
+       pte_t *src_pte, *dst_pte, entry, dst_entry;
        struct page *ptepage;
        unsigned long addr;
        int cow;
@@ -3261,15 +3261,30 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                        break;
                }
 
-               /* If the pagetables are shared don't copy or take references */
-               if (dst_pte == src_pte)
+               /*
+                * If the pagetables are shared don't copy or take references.
+                * dst_pte == src_pte is the common case of src/dest sharing.
+                *
+                * However, src could have 'unshared' and dst shares with
+                * another vma.  If dst_pte !none, this implies sharing.
+                * Check here before taking page table lock, and once again
+                * after taking the lock below.
+                */
+               dst_entry = huge_ptep_get(dst_pte);
+               if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
                        continue;
 
                dst_ptl = huge_pte_lock(h, dst, dst_pte);
                src_ptl = huge_pte_lockptr(h, src, src_pte);
                spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
                entry = huge_ptep_get(src_pte);
-               if (huge_pte_none(entry)) { /* skip none entry */
+               dst_entry = huge_ptep_get(dst_pte);
+               if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
+                       /*
+                        * Skip if src entry none.  Also, skip in the
+                        * unlikely case dst entry !none as this implies
+                        * sharing with another vma.
+                        */
                        ;
                } else if (unlikely(is_hugetlb_entry_migration(entry) ||
                                    is_hugetlb_entry_hwpoisoned(entry))) {
index 7df468c8ebc8c0ada5b560ed70c9c821e5127c05..9a2d5ae81ae1cf4217ed3174d72667be276769da 100644 (file)
@@ -1179,7 +1179,7 @@ void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
 
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 /*
- * Common iterator interface used to define for_each_mem_range().
+ * Common iterator interface used to define for_each_mem_pfn_range().
  */
 void __init_memblock __next_mem_pfn_range(int *idx, int nid,
                                unsigned long *out_start_pfn,
index a919ba5cb3c845e03e4a070eff354acf19ec7c4a..6847177dc4a1a89ce098c6ab6f4b19e46dab2428 100644 (file)
@@ -4060,17 +4060,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
        unsigned int cpuset_mems_cookie;
        int reserve_flags;
 
-       /*
-        * In the slowpath, we sanity check order to avoid ever trying to
-        * reclaim >= MAX_ORDER areas which will never succeed. Callers may
-        * be using allocators in order of preference for an area that is
-        * too large.
-        */
-       if (order >= MAX_ORDER) {
-               WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
-               return NULL;
-       }
-
        /*
         * We also sanity check to catch abuse of atomic reserves being used by
         * callers that are not in atomic context.
@@ -4364,6 +4353,15 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
        gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
        struct alloc_context ac = { };
 
+       /*
+        * There are several places where we assume that the order value is sane
+        * so bail out early if the request is out of bound.
+        */
+       if (unlikely(order >= MAX_ORDER)) {
+               WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
+               return NULL;
+       }
+
        gfp_mask &= gfp_allowed_mask;
        alloc_mask = gfp_mask;
        if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
@@ -7788,6 +7786,14 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
                if (PageReserved(page))
                        goto unmovable;
 
+               /*
+                * If the zone is movable and we have ruled out all reserved
+                * pages then it should be reasonably safe to assume the rest
+                * is movable.
+                */
+               if (zone_idx(zone) == ZONE_MOVABLE)
+                       continue;
+
                /*
                 * Hugepages are not in LRU lists, but they're movable.
                 * We need not scan over tail pages bacause we don't
index ea26d7a0342d77ac67f47e813a73f125c873a1e5..0e10b06fc7d62eacf4263918850c2a8ade99cd2a 100644 (file)
@@ -756,7 +756,7 @@ void shmem_unlock_mapping(struct address_space *mapping)
                        break;
                index = indices[pvec.nr - 1] + 1;
                pagevec_remove_exceptionals(&pvec);
-               check_move_unevictable_pages(pvec.pages, pvec.nr);
+               check_move_unevictable_pages(&pvec);
                pagevec_release(&pvec);
                cond_resched();
        }
@@ -2563,9 +2563,7 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
        inode_lock(inode);
        /* We're holding i_mutex so we can access i_size directly */
 
-       if (offset < 0)
-               offset = -EINVAL;
-       else if (offset >= inode->i_size)
+       if (offset < 0 || offset >= inode->i_size)
                offset = -ENXIO;
        else {
                start = offset >> PAGE_SHIFT;
index 644f746e167acd65e8244088f483e50c71afbf20..8688ae65ef58ac639b0b2202039fa22577309350 100644 (file)
@@ -2813,7 +2813,7 @@ static struct swap_info_struct *alloc_swap_info(void)
        unsigned int type;
        int i;
 
-       p = kzalloc(sizeof(*p), GFP_KERNEL);
+       p = kvzalloc(sizeof(*p), GFP_KERNEL);
        if (!p)
                return ERR_PTR(-ENOMEM);
 
@@ -2824,7 +2824,7 @@ static struct swap_info_struct *alloc_swap_info(void)
        }
        if (type >= MAX_SWAPFILES) {
                spin_unlock(&swap_lock);
-               kfree(p);
+               kvfree(p);
                return ERR_PTR(-EPERM);
        }
        if (type >= nr_swapfiles) {
@@ -2838,7 +2838,7 @@ static struct swap_info_struct *alloc_swap_info(void)
                smp_wmb();
                nr_swapfiles++;
        } else {
-               kfree(p);
+               kvfree(p);
                p = swap_info[type];
                /*
                 * Do not memset this entry: a racing procfs swap_next()
index 62ac0c488624fd8fd3d2306b04951466cca1a0df..24ab1f7394abaafa9e0dccac37597ae65ef77e0f 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/delayacct.h>
 #include <linux/sysctl.h>
 #include <linux/oom.h>
+#include <linux/pagevec.h>
 #include <linux/prefetch.h>
 #include <linux/printk.h>
 #include <linux/dax.h>
@@ -4182,17 +4183,16 @@ int page_evictable(struct page *page)
        return ret;
 }
 
-#ifdef CONFIG_SHMEM
 /**
- * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
- * @pages:     array of pages to check
- * @nr_pages:  number of pages to check
+ * check_move_unevictable_pages - check pages for evictability and move to
+ * appropriate zone lru list
+ * @pvec: pagevec with lru pages to check
  *
- * Checks pages for evictability and moves them to the appropriate lru list.
- *
- * This function is only used for SysV IPC SHM_UNLOCK.
+ * Checks pages for evictability, if an evictable page is in the unevictable
+ * lru list, moves it to the appropriate evictable lru list. This function
+ * should be only used for lru pages.
  */
-void check_move_unevictable_pages(struct page **pages, int nr_pages)
+void check_move_unevictable_pages(struct pagevec *pvec)
 {
        struct lruvec *lruvec;
        struct pglist_data *pgdat = NULL;
@@ -4200,8 +4200,8 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
        int pgrescued = 0;
        int i;
 
-       for (i = 0; i < nr_pages; i++) {
-               struct page *page = pages[i];
+       for (i = 0; i < pvec->nr; i++) {
+               struct page *page = pvec->pages[i];
                struct pglist_data *pagepgdat = page_pgdat(page);
 
                pgscanned++;
@@ -4233,4 +4233,4 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
                spin_unlock_irq(&pgdat->lru_lock);
        }
 }
-#endif /* CONFIG_SHMEM */
+EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
index 6038ce593ce3e1ca4cce34a02bc0bbd4c1d3a296..9c624595e90416bc9114fc11721e14d56183a1b6 100644 (file)
@@ -1827,12 +1827,13 @@ static bool need_update(int cpu)
 
                /*
                 * The fast way of checking if there are any vmstat diffs.
-                * This works because the diffs are byte sized items.
                 */
-               if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS))
+               if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS *
+                              sizeof(p->vm_stat_diff[0])))
                        return true;
 #ifdef CONFIG_NUMA
-               if (memchr_inv(p->vm_numa_stat_diff, 0, NR_VM_NUMA_STAT_ITEMS))
+               if (memchr_inv(p->vm_numa_stat_diff, 0, NR_VM_NUMA_STAT_ITEMS *
+                              sizeof(p->vm_numa_stat_diff[0])))
                        return true;
 #endif
        }
index 4b366d181f35d12f9a1e600bb7083bbf4dfe7fff..aee9b0b8d9078a0bbf59a06509c6f7f7aa1360f3 100644 (file)
@@ -99,6 +99,7 @@ struct z3fold_header {
 #define NCHUNKS                ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
 
 #define BUDDY_MASK     (0x3)
+#define BUDDY_SHIFT    2
 
 /**
  * struct z3fold_pool - stores metadata for each z3fold pool
@@ -145,7 +146,7 @@ enum z3fold_page_flags {
        MIDDLE_CHUNK_MAPPED,
        NEEDS_COMPACTING,
        PAGE_STALE,
-       UNDER_RECLAIM
+       PAGE_CLAIMED, /* by either reclaim or free */
 };
 
 /*****************
@@ -174,7 +175,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page,
        clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
        clear_bit(NEEDS_COMPACTING, &page->private);
        clear_bit(PAGE_STALE, &page->private);
-       clear_bit(UNDER_RECLAIM, &page->private);
+       clear_bit(PAGE_CLAIMED, &page->private);
 
        spin_lock_init(&zhdr->page_lock);
        kref_init(&zhdr->refcount);
@@ -223,8 +224,11 @@ static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
        unsigned long handle;
 
        handle = (unsigned long)zhdr;
-       if (bud != HEADLESS)
-               handle += (bud + zhdr->first_num) & BUDDY_MASK;
+       if (bud != HEADLESS) {
+               handle |= (bud + zhdr->first_num) & BUDDY_MASK;
+               if (bud == LAST)
+                       handle |= (zhdr->last_chunks << BUDDY_SHIFT);
+       }
        return handle;
 }
 
@@ -234,6 +238,12 @@ static struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
        return (struct z3fold_header *)(handle & PAGE_MASK);
 }
 
+/* only for LAST bud, returns zero otherwise */
+static unsigned short handle_to_chunks(unsigned long handle)
+{
+       return (handle & ~PAGE_MASK) >> BUDDY_SHIFT;
+}
+
 /*
  * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
  *  but that doesn't matter. because the masking will result in the
@@ -720,37 +730,39 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
        page = virt_to_page(zhdr);
 
        if (test_bit(PAGE_HEADLESS, &page->private)) {
-               /* HEADLESS page stored */
-               bud = HEADLESS;
-       } else {
-               z3fold_page_lock(zhdr);
-               bud = handle_to_buddy(handle);
-
-               switch (bud) {
-               case FIRST:
-                       zhdr->first_chunks = 0;
-                       break;
-               case MIDDLE:
-                       zhdr->middle_chunks = 0;
-                       zhdr->start_middle = 0;
-                       break;
-               case LAST:
-                       zhdr->last_chunks = 0;
-                       break;
-               default:
-                       pr_err("%s: unknown bud %d\n", __func__, bud);
-                       WARN_ON(1);
-                       z3fold_page_unlock(zhdr);
-                       return;
+               /* if a headless page is under reclaim, just leave.
+                * NB: we use test_and_set_bit for a reason: if the bit
+                * has not been set before, we release this page
+                * immediately so we don't care about its value any more.
+                */
+               if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) {
+                       spin_lock(&pool->lock);
+                       list_del(&page->lru);
+                       spin_unlock(&pool->lock);
+                       free_z3fold_page(page);
+                       atomic64_dec(&pool->pages_nr);
                }
+               return;
        }
 
-       if (bud == HEADLESS) {
-               spin_lock(&pool->lock);
-               list_del(&page->lru);
-               spin_unlock(&pool->lock);
-               free_z3fold_page(page);
-               atomic64_dec(&pool->pages_nr);
+       /* Non-headless case */
+       z3fold_page_lock(zhdr);
+       bud = handle_to_buddy(handle);
+
+       switch (bud) {
+       case FIRST:
+               zhdr->first_chunks = 0;
+               break;
+       case MIDDLE:
+               zhdr->middle_chunks = 0;
+               break;
+       case LAST:
+               zhdr->last_chunks = 0;
+               break;
+       default:
+               pr_err("%s: unknown bud %d\n", __func__, bud);
+               WARN_ON(1);
+               z3fold_page_unlock(zhdr);
                return;
        }
 
@@ -758,7 +770,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
                atomic64_dec(&pool->pages_nr);
                return;
        }
-       if (test_bit(UNDER_RECLAIM, &page->private)) {
+       if (test_bit(PAGE_CLAIMED, &page->private)) {
                z3fold_page_unlock(zhdr);
                return;
        }
@@ -836,20 +848,30 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
                }
                list_for_each_prev(pos, &pool->lru) {
                        page = list_entry(pos, struct page, lru);
+
+                       /* this bit could have been set by free, in which case
+                        * we pass over to the next page in the pool.
+                        */
+                       if (test_and_set_bit(PAGE_CLAIMED, &page->private))
+                               continue;
+
+                       zhdr = page_address(page);
                        if (test_bit(PAGE_HEADLESS, &page->private))
-                               /* candidate found */
                                break;
 
-                       zhdr = page_address(page);
-                       if (!z3fold_page_trylock(zhdr))
+                       if (!z3fold_page_trylock(zhdr)) {
+                               zhdr = NULL;
                                continue; /* can't evict at this point */
+                       }
                        kref_get(&zhdr->refcount);
                        list_del_init(&zhdr->buddy);
                        zhdr->cpu = -1;
-                       set_bit(UNDER_RECLAIM, &page->private);
                        break;
                }
 
+               if (!zhdr)
+                       break;
+
                list_del_init(&page->lru);
                spin_unlock(&pool->lock);
 
@@ -898,6 +920,7 @@ next:
                if (test_bit(PAGE_HEADLESS, &page->private)) {
                        if (ret == 0) {
                                free_z3fold_page(page);
+                               atomic64_dec(&pool->pages_nr);
                                return 0;
                        }
                        spin_lock(&pool->lock);
@@ -905,7 +928,7 @@ next:
                        spin_unlock(&pool->lock);
                } else {
                        z3fold_page_lock(zhdr);
-                       clear_bit(UNDER_RECLAIM, &page->private);
+                       clear_bit(PAGE_CLAIMED, &page->private);
                        if (kref_put(&zhdr->refcount,
                                        release_z3fold_page_locked)) {
                                atomic64_dec(&pool->pages_nr);
@@ -964,7 +987,7 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
                set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
                break;
        case LAST:
-               addr += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
+               addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
                break;
        default:
                pr_err("unknown buddy id %d\n", buddy);
index 77d43ae2a7bbe1267f8430d5c35637d1984f463c..0ffcbdd55fa9ee545c807f2ed3fc178830e3075a 100644 (file)
@@ -3272,7 +3272,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *de
                }
 
                skb = next;
-               if (netif_xmit_stopped(txq) && skb) {
+               if (netif_tx_queue_stopped(txq) && skb) {
                        rc = NETDEV_TX_BUSY;
                        break;
                }
index 676f3ad629f95625422aa55f0f54157001ac477c..588f475019d47c9d6bae8883acebab48aaf63b48 100644 (file)
@@ -1166,8 +1166,8 @@ ip_proto_again:
                break;
        }
 
-       if (dissector_uses_key(flow_dissector,
-                              FLOW_DISSECTOR_KEY_PORTS)) {
+       if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS) &&
+           !(key_control->flags & FLOW_DIS_IS_FRAGMENT)) {
                key_ports = skb_flow_dissector_target(flow_dissector,
                                                      FLOW_DISSECTOR_KEY_PORTS,
                                                      target_container);
index 5da9552b186bc853904f7c85bbf872925463896c..2b9fdbc43205f3d8cf826b2074493aa5e72401fb 100644 (file)
@@ -717,7 +717,8 @@ int netpoll_setup(struct netpoll *np)
 
                                read_lock_bh(&idev->lock);
                                list_for_each_entry(ifp, &idev->addr_list, if_list) {
-                                       if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
+                                       if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
+                                           !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
                                                continue;
                                        np->local_ip.in6 = ifp->addr;
                                        err = 0;
index e01274bd5e3e21addd346a887e24e2c709ef44c9..33d9227a8b8077a8cf6edbcaaa9f5b92d4fee48e 100644 (file)
@@ -3367,7 +3367,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
                        cb->seq = 0;
                }
                ret = dumpit(skb, cb);
-               if (ret < 0)
+               if (ret)
                        break;
        }
        cb->family = idx;
index 946de0e24c876bbbe63de71b5c7cef91cb967708..b4ee5c8b928f07879b3c7ed08ed5e9b67b08fcb4 100644 (file)
@@ -4944,6 +4944,8 @@ static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
  *
  * This is a helper to do that correctly considering GSO_BY_FRAGS.
  *
+ * @skb: GSO skb
+ *
  * @seg_len: The segmented length (from skb_gso_*_seglen). In the
  *           GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
  *
index 6fcc4bc07d19bd929648f03b136225a69f2eddfc..080a880a1761b8e0efafaddf0ddac5bb87c64f88 100644 (file)
@@ -3279,6 +3279,7 @@ int sock_load_diag_module(int family, int protocol)
 
 #ifdef CONFIG_INET
        if (family == AF_INET &&
+           protocol != IPPROTO_RAW &&
            !rcu_access_pointer(inet_protos[protocol]))
                return -ENOENT;
 #endif
index bcb11f3a27c0c34115af05034a5a20f57842eb0a..760a9e52e02b91b36af323c92f7027e150858f88 100644 (file)
@@ -178,21 +178,22 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
 }
 
 static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
-                                               void *arg)
+                                               void *arg,
+                                               struct inet_frag_queue **prev)
 {
        struct inet_frags *f = nf->f;
        struct inet_frag_queue *q;
-       int err;
 
        q = inet_frag_alloc(nf, f, arg);
-       if (!q)
+       if (!q) {
+               *prev = ERR_PTR(-ENOMEM);
                return NULL;
-
+       }
        mod_timer(&q->timer, jiffies + nf->timeout);
 
-       err = rhashtable_insert_fast(&nf->rhashtable, &q->node,
-                                    f->rhash_params);
-       if (err < 0) {
+       *prev = rhashtable_lookup_get_insert_key(&nf->rhashtable, &q->key,
+                                                &q->node, f->rhash_params);
+       if (*prev) {
                q->flags |= INET_FRAG_COMPLETE;
                inet_frag_kill(q);
                inet_frag_destroy(q);
@@ -204,22 +205,22 @@ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
 /* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
 {
-       struct inet_frag_queue *fq;
+       struct inet_frag_queue *fq = NULL, *prev;
 
        if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
                return NULL;
 
        rcu_read_lock();
 
-       fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
-       if (fq) {
+       prev = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
+       if (!prev)
+               fq = inet_frag_create(nf, key, &prev);
+       if (prev && !IS_ERR(prev)) {
+               fq = prev;
                if (!refcount_inc_not_zero(&fq->refcnt))
                        fq = NULL;
-               rcu_read_unlock();
-               return fq;
        }
        rcu_read_unlock();
-
-       return inet_frag_create(nf, key);
+       return fq;
 }
 EXPORT_SYMBOL(inet_frag_find);
index 9b0158fa431f2245c0fa7e21d62e3ac01296dc20..d6ee343fdb8647ea96240d017b72aef2f6790299 100644 (file)
@@ -722,10 +722,14 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
        if (ip_is_fragment(&iph)) {
                skb = skb_share_check(skb, GFP_ATOMIC);
                if (skb) {
-                       if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
-                               return skb;
-                       if (pskb_trim_rcsum(skb, netoff + len))
-                               return skb;
+                       if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) {
+                               kfree_skb(skb);
+                               return NULL;
+                       }
+                       if (pskb_trim_rcsum(skb, netoff + len)) {
+                               kfree_skb(skb);
+                               return NULL;
+                       }
                        memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
                        if (ip_defrag(net, skb, user))
                                return NULL;
index 26c36cccabdc2c8cc95cfd609672d412c493fc42..fffcc130900e518874027562272b1052cf0bdd16 100644 (file)
@@ -1246,7 +1246,7 @@ int ip_setsockopt(struct sock *sk, int level,
                return -ENOPROTOOPT;
 
        err = do_ip_setsockopt(sk, level, optname, optval, optlen);
-#ifdef CONFIG_BPFILTER
+#if IS_ENABLED(CONFIG_BPFILTER_UMH)
        if (optname >= BPFILTER_IPT_SO_SET_REPLACE &&
            optname < BPFILTER_IPT_SET_MAX)
                err = bpfilter_ip_set_sockopt(sk, optname, optval, optlen);
@@ -1559,7 +1559,7 @@ int ip_getsockopt(struct sock *sk, int level,
        int err;
 
        err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0);
-#ifdef CONFIG_BPFILTER
+#if IS_ENABLED(CONFIG_BPFILTER_UMH)
        if (optname >= BPFILTER_IPT_SO_GET_INFO &&
            optname < BPFILTER_IPT_GET_MAX)
                err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
@@ -1596,7 +1596,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
        err = do_ip_getsockopt(sk, level, optname, optval, optlen,
                MSG_CMSG_COMPAT);
 
-#ifdef CONFIG_BPFILTER
+#if IS_ENABLED(CONFIG_BPFILTER_UMH)
        if (optname >= BPFILTER_IPT_SO_GET_INFO &&
            optname < BPFILTER_IPT_GET_MAX)
                err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
index 3f4d61017a6947c9dfb5cd1a38e5a25f1665928f..f0cd291034f0fa8ece55acd0fccf79e02629c98a 100644 (file)
@@ -1001,6 +1001,9 @@ static int __init inet6_init(void)
        err = ip6_flowlabel_init();
        if (err)
                goto ip6_flowlabel_fail;
+       err = ipv6_anycast_init();
+       if (err)
+               goto ipv6_anycast_fail;
        err = addrconf_init();
        if (err)
                goto addrconf_fail;
@@ -1091,6 +1094,8 @@ ipv6_frag_fail:
 ipv6_exthdrs_fail:
        addrconf_cleanup();
 addrconf_fail:
+       ipv6_anycast_cleanup();
+ipv6_anycast_fail:
        ip6_flowlabel_cleanup();
 ip6_flowlabel_fail:
        ndisc_late_cleanup();
index 4e0ff7031edd55ce6dbb3f2c62e22b9040cc7fec..94999058e11029b637b6ab8201f8706599e49284 100644 (file)
 
 #include <net/checksum.h>
 
+#define IN6_ADDR_HSIZE_SHIFT   8
+#define IN6_ADDR_HSIZE         BIT(IN6_ADDR_HSIZE_SHIFT)
+/*     anycast address hash table
+ */
+static struct hlist_head inet6_acaddr_lst[IN6_ADDR_HSIZE];
+static DEFINE_SPINLOCK(acaddr_hash_lock);
+
 static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr);
 
+static u32 inet6_acaddr_hash(struct net *net, const struct in6_addr *addr)
+{
+       u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net);
+
+       return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
+}
+
 /*
  *     socket join an anycast group
  */
@@ -204,16 +218,39 @@ void ipv6_sock_ac_close(struct sock *sk)
        rtnl_unlock();
 }
 
+static void ipv6_add_acaddr_hash(struct net *net, struct ifacaddr6 *aca)
+{
+       unsigned int hash = inet6_acaddr_hash(net, &aca->aca_addr);
+
+       spin_lock(&acaddr_hash_lock);
+       hlist_add_head_rcu(&aca->aca_addr_lst, &inet6_acaddr_lst[hash]);
+       spin_unlock(&acaddr_hash_lock);
+}
+
+static void ipv6_del_acaddr_hash(struct ifacaddr6 *aca)
+{
+       spin_lock(&acaddr_hash_lock);
+       hlist_del_init_rcu(&aca->aca_addr_lst);
+       spin_unlock(&acaddr_hash_lock);
+}
+
 static void aca_get(struct ifacaddr6 *aca)
 {
        refcount_inc(&aca->aca_refcnt);
 }
 
+static void aca_free_rcu(struct rcu_head *h)
+{
+       struct ifacaddr6 *aca = container_of(h, struct ifacaddr6, rcu);
+
+       fib6_info_release(aca->aca_rt);
+       kfree(aca);
+}
+
 static void aca_put(struct ifacaddr6 *ac)
 {
        if (refcount_dec_and_test(&ac->aca_refcnt)) {
-               fib6_info_release(ac->aca_rt);
-               kfree(ac);
+               call_rcu(&ac->rcu, aca_free_rcu);
        }
 }
 
@@ -229,6 +266,7 @@ static struct ifacaddr6 *aca_alloc(struct fib6_info *f6i,
        aca->aca_addr = *addr;
        fib6_info_hold(f6i);
        aca->aca_rt = f6i;
+       INIT_HLIST_NODE(&aca->aca_addr_lst);
        aca->aca_users = 1;
        /* aca_tstamp should be updated upon changes */
        aca->aca_cstamp = aca->aca_tstamp = jiffies;
@@ -285,6 +323,8 @@ int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr)
        aca_get(aca);
        write_unlock_bh(&idev->lock);
 
+       ipv6_add_acaddr_hash(net, aca);
+
        ip6_ins_rt(net, f6i);
 
        addrconf_join_solict(idev->dev, &aca->aca_addr);
@@ -325,6 +365,7 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
        else
                idev->ac_list = aca->aca_next;
        write_unlock_bh(&idev->lock);
+       ipv6_del_acaddr_hash(aca);
        addrconf_leave_solict(idev, &aca->aca_addr);
 
        ip6_del_rt(dev_net(idev->dev), aca->aca_rt);
@@ -352,6 +393,8 @@ void ipv6_ac_destroy_dev(struct inet6_dev *idev)
                idev->ac_list = aca->aca_next;
                write_unlock_bh(&idev->lock);
 
+               ipv6_del_acaddr_hash(aca);
+
                addrconf_leave_solict(idev, &aca->aca_addr);
 
                ip6_del_rt(dev_net(idev->dev), aca->aca_rt);
@@ -390,17 +433,25 @@ static bool ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *ad
 bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
                         const struct in6_addr *addr)
 {
+       unsigned int hash = inet6_acaddr_hash(net, addr);
+       struct net_device *nh_dev;
+       struct ifacaddr6 *aca;
        bool found = false;
 
        rcu_read_lock();
        if (dev)
                found = ipv6_chk_acast_dev(dev, addr);
        else
-               for_each_netdev_rcu(net, dev)
-                       if (ipv6_chk_acast_dev(dev, addr)) {
+               hlist_for_each_entry_rcu(aca, &inet6_acaddr_lst[hash],
+                                        aca_addr_lst) {
+                       nh_dev = fib6_info_nh_dev(aca->aca_rt);
+                       if (!nh_dev || !net_eq(dev_net(nh_dev), net))
+                               continue;
+                       if (ipv6_addr_equal(&aca->aca_addr, addr)) {
                                found = true;
                                break;
                        }
+               }
        rcu_read_unlock();
        return found;
 }
@@ -540,3 +591,24 @@ void ac6_proc_exit(struct net *net)
        remove_proc_entry("anycast6", net->proc_net);
 }
 #endif
+
+/*     Init / cleanup code
+ */
+int __init ipv6_anycast_init(void)
+{
+       int i;
+
+       for (i = 0; i < IN6_ADDR_HSIZE; i++)
+               INIT_HLIST_HEAD(&inet6_acaddr_lst[i]);
+       return 0;
+}
+
+void ipv6_anycast_cleanup(void)
+{
+       int i;
+
+       spin_lock(&acaddr_hash_lock);
+       for (i = 0; i < IN6_ADDR_HSIZE; i++)
+               WARN_ON(!hlist_empty(&inet6_acaddr_lst[i]));
+       spin_unlock(&acaddr_hash_lock);
+}
index 1b8bc008b53b642adef3ba9335563d430a99c1a9..ae3786132c236b2bcde4f8f3008fceb2d6bc1cdd 100644 (file)
@@ -591,7 +591,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
 
        /* fib entries are never clones */
        if (arg.filter.flags & RTM_F_CLONED)
-               return skb->len;
+               goto out;
 
        w = (void *)cb->args[2];
        if (!w) {
@@ -621,7 +621,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
                tb = fib6_get_table(net, arg.filter.table_id);
                if (!tb) {
                        if (arg.filter.dump_all_families)
-                               return skb->len;
+                               goto out;
 
                        NL_SET_ERR_MSG_MOD(cb->extack, "FIB table does not exist");
                        return -ENOENT;
index b8ac369f98ad877f6cf9114b1dbcfcb6c4c95ec5..d219979c3e529c32e029865debc788109d05ad83 100644 (file)
@@ -587,11 +587,16 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
         */
        ret = -EINPROGRESS;
        if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
-           fq->q.meat == fq->q.len &&
-           nf_ct_frag6_reasm(fq, skb, dev))
-               ret = 0;
-       else
+           fq->q.meat == fq->q.len) {
+               unsigned long orefdst = skb->_skb_refdst;
+
+               skb->_skb_refdst = 0UL;
+               if (nf_ct_frag6_reasm(fq, skb, dev))
+                       ret = 0;
+               skb->_skb_refdst = orefdst;
+       } else {
                skb_dst_drop(skb);
+       }
 
 out_unlock:
        spin_unlock_bh(&fq->q.lock);
index bc4bd247bb7d42767eb860c05fb4b0b40408304b..1577f2f76060dcd816f94078412f52943568ce40 100644 (file)
@@ -55,11 +55,15 @@ MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
 MODULE_DESCRIPTION("core IP set support");
 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
 
-/* When the nfnl mutex is held: */
+/* When the nfnl mutex or ip_set_ref_lock is held: */
 #define ip_set_dereference(p)          \
-       rcu_dereference_protected(p, lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
+       rcu_dereference_protected(p,    \
+               lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
+               lockdep_is_held(&ip_set_ref_lock))
 #define ip_set(inst, id)               \
        ip_set_dereference((inst)->ip_set_list)[id]
+#define ip_set_ref_netlink(inst,id)    \
+       rcu_dereference_raw((inst)->ip_set_list)[id]
 
 /* The set types are implemented in modules and registered set types
  * can be found in ip_set_type_list. Adding/deleting types is
@@ -693,21 +697,20 @@ ip_set_put_byindex(struct net *net, ip_set_id_t index)
 EXPORT_SYMBOL_GPL(ip_set_put_byindex);
 
 /* Get the name of a set behind a set index.
- * We assume the set is referenced, so it does exist and
- * can't be destroyed. The set cannot be renamed due to
- * the referencing either.
- *
+ * Set itself is protected by RCU, but its name isn't: to protect against
+ * renaming, grab ip_set_ref_lock as reader (see ip_set_rename()) and copy the
+ * name.
  */
-const char *
-ip_set_name_byindex(struct net *net, ip_set_id_t index)
+void
+ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name)
 {
-       const struct ip_set *set = ip_set_rcu_get(net, index);
+       struct ip_set *set = ip_set_rcu_get(net, index);
 
        BUG_ON(!set);
-       BUG_ON(set->ref == 0);
 
-       /* Referenced, so it's safe */
-       return set->name;
+       read_lock_bh(&ip_set_ref_lock);
+       strncpy(name, set->name, IPSET_MAXNAMELEN);
+       read_unlock_bh(&ip_set_ref_lock);
 }
 EXPORT_SYMBOL_GPL(ip_set_name_byindex);
 
@@ -961,7 +964,7 @@ static int ip_set_create(struct net *net, struct sock *ctnl,
                        /* Wraparound */
                        goto cleanup;
 
-               list = kcalloc(i, sizeof(struct ip_set *), GFP_KERNEL);
+               list = kvcalloc(i, sizeof(struct ip_set *), GFP_KERNEL);
                if (!list)
                        goto cleanup;
                /* nfnl mutex is held, both lists are valid */
@@ -973,7 +976,7 @@ static int ip_set_create(struct net *net, struct sock *ctnl,
                /* Use new list */
                index = inst->ip_set_max;
                inst->ip_set_max = i;
-               kfree(tmp);
+               kvfree(tmp);
                ret = 0;
        } else if (ret) {
                goto cleanup;
@@ -1153,7 +1156,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
        if (!set)
                return -ENOENT;
 
-       read_lock_bh(&ip_set_ref_lock);
+       write_lock_bh(&ip_set_ref_lock);
        if (set->ref != 0) {
                ret = -IPSET_ERR_REFERENCED;
                goto out;
@@ -1170,7 +1173,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
        strncpy(set->name, name2, IPSET_MAXNAMELEN);
 
 out:
-       read_unlock_bh(&ip_set_ref_lock);
+       write_unlock_bh(&ip_set_ref_lock);
        return ret;
 }
 
@@ -1252,7 +1255,7 @@ ip_set_dump_done(struct netlink_callback *cb)
                struct ip_set_net *inst =
                        (struct ip_set_net *)cb->args[IPSET_CB_NET];
                ip_set_id_t index = (ip_set_id_t)cb->args[IPSET_CB_INDEX];
-               struct ip_set *set = ip_set(inst, index);
+               struct ip_set *set = ip_set_ref_netlink(inst, index);
 
                if (set->variant->uref)
                        set->variant->uref(set, cb, false);
@@ -1441,7 +1444,7 @@ next_set:
 release_refcount:
        /* If there was an error or set is done, release set */
        if (ret || !cb->args[IPSET_CB_ARG0]) {
-               set = ip_set(inst, index);
+               set = ip_set_ref_netlink(inst, index);
                if (set->variant->uref)
                        set->variant->uref(set, cb, false);
                pr_debug("release set %s\n", set->name);
@@ -2059,7 +2062,7 @@ ip_set_net_init(struct net *net)
        if (inst->ip_set_max >= IPSET_INVALID_ID)
                inst->ip_set_max = IPSET_INVALID_ID - 1;
 
-       list = kcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL);
+       list = kvcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL);
        if (!list)
                return -ENOMEM;
        inst->is_deleted = false;
@@ -2087,7 +2090,7 @@ ip_set_net_exit(struct net *net)
                }
        }
        nfnl_unlock(NFNL_SUBSYS_IPSET);
-       kfree(rcu_dereference_protected(inst->ip_set_list, 1));
+       kvfree(rcu_dereference_protected(inst->ip_set_list, 1));
 }
 
 static struct pernet_operations ip_set_net_ops = {
index d391485a6acdc2ff3523d5b7d39c20ab4a8add80..613e18e720a44777754428666b9f021de952de9a 100644 (file)
@@ -213,13 +213,13 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        if (tb[IPSET_ATTR_CIDR]) {
                e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
-               if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
+               if (e.cidr[0] > HOST_MASK)
                        return -IPSET_ERR_INVALID_CIDR;
        }
 
        if (tb[IPSET_ATTR_CIDR2]) {
                e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
-               if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
+               if (e.cidr[1] > HOST_MASK)
                        return -IPSET_ERR_INVALID_CIDR;
        }
 
@@ -493,13 +493,13 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
 
        if (tb[IPSET_ATTR_CIDR]) {
                e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
-               if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
+               if (e.cidr[0] > HOST_MASK)
                        return -IPSET_ERR_INVALID_CIDR;
        }
 
        if (tb[IPSET_ATTR_CIDR2]) {
                e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
-               if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
+               if (e.cidr[1] > HOST_MASK)
                        return -IPSET_ERR_INVALID_CIDR;
        }
 
index 072a658fde047c5d9d59ac08e796b25759cc68a1..4eef55da0878e299d0bb912fa7ea69d3d4e91441 100644 (file)
@@ -148,9 +148,7 @@ __list_set_del_rcu(struct rcu_head * rcu)
 {
        struct set_elem *e = container_of(rcu, struct set_elem, rcu);
        struct ip_set *set = e->set;
-       struct list_set *map = set->data;
 
-       ip_set_put_byindex(map->net, e->id);
        ip_set_ext_destroy(set, e);
        kfree(e);
 }
@@ -158,15 +156,21 @@ __list_set_del_rcu(struct rcu_head * rcu)
 static inline void
 list_set_del(struct ip_set *set, struct set_elem *e)
 {
+       struct list_set *map = set->data;
+
        set->elements--;
        list_del_rcu(&e->list);
+       ip_set_put_byindex(map->net, e->id);
        call_rcu(&e->rcu, __list_set_del_rcu);
 }
 
 static inline void
-list_set_replace(struct set_elem *e, struct set_elem *old)
+list_set_replace(struct ip_set *set, struct set_elem *e, struct set_elem *old)
 {
+       struct list_set *map = set->data;
+
        list_replace_rcu(&old->list, &e->list);
+       ip_set_put_byindex(map->net, old->id);
        call_rcu(&old->rcu, __list_set_del_rcu);
 }
 
@@ -298,7 +302,7 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
        INIT_LIST_HEAD(&e->list);
        list_set_init_extensions(set, ext, e);
        if (n)
-               list_set_replace(e, n);
+               list_set_replace(set, e, n);
        else if (next)
                list_add_tail_rcu(&e->list, &next->list);
        else if (prev)
@@ -486,6 +490,7 @@ list_set_list(const struct ip_set *set,
        const struct list_set *map = set->data;
        struct nlattr *atd, *nested;
        u32 i = 0, first = cb->args[IPSET_CB_ARG0];
+       char name[IPSET_MAXNAMELEN];
        struct set_elem *e;
        int ret = 0;
 
@@ -504,8 +509,8 @@ list_set_list(const struct ip_set *set,
                nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
                if (!nested)
                        goto nla_put_failure;
-               if (nla_put_string(skb, IPSET_ATTR_NAME,
-                                  ip_set_name_byindex(map->net, e->id)))
+               ip_set_name_byindex(map->net, e->id, name);
+               if (nla_put_string(skb, IPSET_ATTR_NAME, name))
                        goto nla_put_failure;
                if (ip_set_put_extensions(skb, set, e, true))
                        goto nla_put_failure;
index ca1168d67fac6c0fc1eaef5dfeb1db8428e51db3..e92e749aff53e46c60718b55593e72d70838e9be 100644 (file)
@@ -1073,19 +1073,22 @@ static unsigned int early_drop_list(struct net *net,
        return drops;
 }
 
-static noinline int early_drop(struct net *net, unsigned int _hash)
+static noinline int early_drop(struct net *net, unsigned int hash)
 {
-       unsigned int i;
+       unsigned int i, bucket;
 
        for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
                struct hlist_nulls_head *ct_hash;
-               unsigned int hash, hsize, drops;
+               unsigned int hsize, drops;
 
                rcu_read_lock();
                nf_conntrack_get_ht(&ct_hash, &hsize);
-               hash = reciprocal_scale(_hash++, hsize);
+               if (!i)
+                       bucket = reciprocal_scale(hash, hsize);
+               else
+                       bucket = (bucket + 1) % hsize;
 
-               drops = early_drop_list(net, &ct_hash[hash]);
+               drops = early_drop_list(net, &ct_hash[bucket]);
                rcu_read_unlock();
 
                if (drops) {
index 171e9e122e5f1e8b8840e41013d86246ba8025b9..023c1445bc3960de8c3d2350d9fb5c8d743e920f 100644 (file)
@@ -384,11 +384,6 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
        },
 };
 
-static inline struct nf_dccp_net *dccp_pernet(struct net *net)
-{
-       return &net->ct.nf_ct_proto.dccp;
-}
-
 static noinline bool
 dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
         const struct dccp_hdr *dh)
@@ -401,7 +396,7 @@ dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
        state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
        switch (state) {
        default:
-               dn = dccp_pernet(net);
+               dn = nf_dccp_pernet(net);
                if (dn->dccp_loose == 0) {
                        msg = "not picking up existing connection ";
                        goto out_invalid;
@@ -568,7 +563,7 @@ static int dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
 
        timeouts = nf_ct_timeout_lookup(ct);
        if (!timeouts)
-               timeouts = dccp_pernet(nf_ct_net(ct))->dccp_timeout;
+               timeouts = nf_dccp_pernet(nf_ct_net(ct))->dccp_timeout;
        nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
 
        return NF_ACCEPT;
@@ -681,7 +676,7 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
 static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
                                      struct net *net, void *data)
 {
-       struct nf_dccp_net *dn = dccp_pernet(net);
+       struct nf_dccp_net *dn = nf_dccp_pernet(net);
        unsigned int *timeouts = data;
        int i;
 
@@ -814,7 +809,7 @@ static int dccp_kmemdup_sysctl_table(struct net *net, struct nf_proto_net *pn,
 
 static int dccp_init_net(struct net *net)
 {
-       struct nf_dccp_net *dn = dccp_pernet(net);
+       struct nf_dccp_net *dn = nf_dccp_pernet(net);
        struct nf_proto_net *pn = &dn->pn;
 
        if (!pn->users) {
index e10e867e0b55f3203e8a50d4ac7c884201ac1186..5da19d5fbc767f2ca8f22ac4eba09aebb6c59fda 100644 (file)
@@ -27,11 +27,6 @@ static bool nf_generic_should_process(u8 proto)
        }
 }
 
-static inline struct nf_generic_net *generic_pernet(struct net *net)
-{
-       return &net->ct.nf_ct_proto.generic;
-}
-
 static bool generic_pkt_to_tuple(const struct sk_buff *skb,
                                 unsigned int dataoff,
                                 struct net *net, struct nf_conntrack_tuple *tuple)
@@ -58,7 +53,7 @@ static int generic_packet(struct nf_conn *ct,
        }
 
        if (!timeout)
-               timeout = &generic_pernet(nf_ct_net(ct))->timeout;
+               timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout;
 
        nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
        return NF_ACCEPT;
@@ -72,7 +67,7 @@ static int generic_packet(struct nf_conn *ct,
 static int generic_timeout_nlattr_to_obj(struct nlattr *tb[],
                                         struct net *net, void *data)
 {
-       struct nf_generic_net *gn = generic_pernet(net);
+       struct nf_generic_net *gn = nf_generic_pernet(net);
        unsigned int *timeout = data;
 
        if (!timeout)
@@ -138,7 +133,7 @@ static int generic_kmemdup_sysctl_table(struct nf_proto_net *pn,
 
 static int generic_init_net(struct net *net)
 {
-       struct nf_generic_net *gn = generic_pernet(net);
+       struct nf_generic_net *gn = nf_generic_pernet(net);
        struct nf_proto_net *pn = &gn->pn;
 
        gn->timeout = nf_ct_generic_timeout;
index 3598520bd19b7b76dbd91bb42e4b8b91713abf2c..de64d8a5fdfd137aca48a9e62a143a01f63bec07 100644 (file)
 
 static const unsigned int nf_ct_icmp_timeout = 30*HZ;
 
-static inline struct nf_icmp_net *icmp_pernet(struct net *net)
-{
-       return &net->ct.nf_ct_proto.icmp;
-}
-
 static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
                              struct net *net, struct nf_conntrack_tuple *tuple)
 {
@@ -103,7 +98,7 @@ static int icmp_packet(struct nf_conn *ct,
        }
 
        if (!timeout)
-               timeout = &icmp_pernet(nf_ct_net(ct))->timeout;
+               timeout = &nf_icmp_pernet(nf_ct_net(ct))->timeout;
 
        nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
        return NF_ACCEPT;
@@ -275,7 +270,7 @@ static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[],
                                      struct net *net, void *data)
 {
        unsigned int *timeout = data;
-       struct nf_icmp_net *in = icmp_pernet(net);
+       struct nf_icmp_net *in = nf_icmp_pernet(net);
 
        if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) {
                if (!timeout)
@@ -337,7 +332,7 @@ static int icmp_kmemdup_sysctl_table(struct nf_proto_net *pn,
 
 static int icmp_init_net(struct net *net)
 {
-       struct nf_icmp_net *in = icmp_pernet(net);
+       struct nf_icmp_net *in = nf_icmp_pernet(net);
        struct nf_proto_net *pn = &in->pn;
 
        in->timeout = nf_ct_icmp_timeout;
index 378618feed5da7df50e09c8ec4f72618953306b0..a15eefb8e3173c5d89268bd7f2a6c076ff787b1c 100644 (file)
 
 static const unsigned int nf_ct_icmpv6_timeout = 30*HZ;
 
-static inline struct nf_icmp_net *icmpv6_pernet(struct net *net)
-{
-       return &net->ct.nf_ct_proto.icmpv6;
-}
-
 static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
                                unsigned int dataoff,
                                struct net *net,
@@ -87,7 +82,7 @@ static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
 
 static unsigned int *icmpv6_get_timeouts(struct net *net)
 {
-       return &icmpv6_pernet(net)->timeout;
+       return &nf_icmpv6_pernet(net)->timeout;
 }
 
 /* Returns verdict for packet, or -1 for invalid. */
@@ -286,7 +281,7 @@ static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[],
                                        struct net *net, void *data)
 {
        unsigned int *timeout = data;
-       struct nf_icmp_net *in = icmpv6_pernet(net);
+       struct nf_icmp_net *in = nf_icmpv6_pernet(net);
 
        if (!timeout)
                timeout = icmpv6_get_timeouts(net);
@@ -348,7 +343,7 @@ static int icmpv6_kmemdup_sysctl_table(struct nf_proto_net *pn,
 
 static int icmpv6_init_net(struct net *net)
 {
-       struct nf_icmp_net *in = icmpv6_pernet(net);
+       struct nf_icmp_net *in = nf_icmpv6_pernet(net);
        struct nf_proto_net *pn = &in->pn;
 
        in->timeout = nf_ct_icmpv6_timeout;
index 3d719d3eb9a38c7709b8d224facdad8820ebded4..d53e3e78f6052a1f8d8fde973ee03b0763470b30 100644 (file)
@@ -146,11 +146,6 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
        }
 };
 
-static inline struct nf_sctp_net *sctp_pernet(struct net *net)
-{
-       return &net->ct.nf_ct_proto.sctp;
-}
-
 #ifdef CONFIG_NF_CONNTRACK_PROCFS
 /* Print out the private part of the conntrack. */
 static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
@@ -480,7 +475,7 @@ static int sctp_packet(struct nf_conn *ct,
 
        timeouts = nf_ct_timeout_lookup(ct);
        if (!timeouts)
-               timeouts = sctp_pernet(nf_ct_net(ct))->timeouts;
+               timeouts = nf_sctp_pernet(nf_ct_net(ct))->timeouts;
 
        nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
 
@@ -599,7 +594,7 @@ static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[],
                                      struct net *net, void *data)
 {
        unsigned int *timeouts = data;
-       struct nf_sctp_net *sn = sctp_pernet(net);
+       struct nf_sctp_net *sn = nf_sctp_pernet(net);
        int i;
 
        /* set default SCTP timeouts. */
@@ -736,7 +731,7 @@ static int sctp_kmemdup_sysctl_table(struct nf_proto_net *pn,
 
 static int sctp_init_net(struct net *net)
 {
-       struct nf_sctp_net *sn = sctp_pernet(net);
+       struct nf_sctp_net *sn = nf_sctp_pernet(net);
        struct nf_proto_net *pn = &sn->pn;
 
        if (!pn->users) {
index 1bcf9984d45e8601646cb2b99dc5f3113a5c8b0a..4dcbd51a8e97f04ad8056374ed892887d2f0798e 100644 (file)
@@ -272,11 +272,6 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
        }
 };
 
-static inline struct nf_tcp_net *tcp_pernet(struct net *net)
-{
-       return &net->ct.nf_ct_proto.tcp;
-}
-
 #ifdef CONFIG_NF_CONNTRACK_PROCFS
 /* Print out the private part of the conntrack. */
 static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
@@ -475,7 +470,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
                          const struct tcphdr *tcph)
 {
        struct net *net = nf_ct_net(ct);
-       struct nf_tcp_net *tn = tcp_pernet(net);
+       struct nf_tcp_net *tn = nf_tcp_pernet(net);
        struct ip_ct_tcp_state *sender = &state->seen[dir];
        struct ip_ct_tcp_state *receiver = &state->seen[!dir];
        const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
@@ -767,7 +762,7 @@ static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
 {
        enum tcp_conntrack new_state;
        struct net *net = nf_ct_net(ct);
-       const struct nf_tcp_net *tn = tcp_pernet(net);
+       const struct nf_tcp_net *tn = nf_tcp_pernet(net);
        const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0];
        const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1];
 
@@ -841,7 +836,7 @@ static int tcp_packet(struct nf_conn *ct,
                      const struct nf_hook_state *state)
 {
        struct net *net = nf_ct_net(ct);
-       struct nf_tcp_net *tn = tcp_pernet(net);
+       struct nf_tcp_net *tn = nf_tcp_pernet(net);
        struct nf_conntrack_tuple *tuple;
        enum tcp_conntrack new_state, old_state;
        unsigned int index, *timeouts;
@@ -1283,7 +1278,7 @@ static unsigned int tcp_nlattr_tuple_size(void)
 static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
                                     struct net *net, void *data)
 {
-       struct nf_tcp_net *tn = tcp_pernet(net);
+       struct nf_tcp_net *tn = nf_tcp_pernet(net);
        unsigned int *timeouts = data;
        int i;
 
@@ -1508,7 +1503,7 @@ static int tcp_kmemdup_sysctl_table(struct nf_proto_net *pn,
 
 static int tcp_init_net(struct net *net)
 {
-       struct nf_tcp_net *tn = tcp_pernet(net);
+       struct nf_tcp_net *tn = nf_tcp_pernet(net);
        struct nf_proto_net *pn = &tn->pn;
 
        if (!pn->users) {
index a7aa70370913ce7e8914343270152fb009eb2a63..c879d8d78cfde88a223b961bb203bf7bb48ef1b2 100644 (file)
@@ -32,14 +32,9 @@ static const unsigned int udp_timeouts[UDP_CT_MAX] = {
        [UDP_CT_REPLIED]        = 180*HZ,
 };
 
-static inline struct nf_udp_net *udp_pernet(struct net *net)
-{
-       return &net->ct.nf_ct_proto.udp;
-}
-
 static unsigned int *udp_get_timeouts(struct net *net)
 {
-       return udp_pernet(net)->timeouts;
+       return nf_udp_pernet(net)->timeouts;
 }
 
 static void udp_error_log(const struct sk_buff *skb,
@@ -212,7 +207,7 @@ static int udp_timeout_nlattr_to_obj(struct nlattr *tb[],
                                     struct net *net, void *data)
 {
        unsigned int *timeouts = data;
-       struct nf_udp_net *un = udp_pernet(net);
+       struct nf_udp_net *un = nf_udp_pernet(net);
 
        if (!timeouts)
                timeouts = un->timeouts;
@@ -292,7 +287,7 @@ static int udp_kmemdup_sysctl_table(struct nf_proto_net *pn,
 
 static int udp_init_net(struct net *net)
 {
-       struct nf_udp_net *un = udp_pernet(net);
+       struct nf_udp_net *un = nf_udp_pernet(net);
        struct nf_proto_net *pn = &un->pn;
 
        if (!pn->users) {
index e7a50af1b3d61a6e12fb74eaa9a9ba02f0a8d22b..a518eb162344e6692e69989cf5ba0cdf03da1333 100644 (file)
@@ -382,7 +382,8 @@ err:
 static int
 cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
                            u32 seq, u32 type, int event, u16 l3num,
-                           const struct nf_conntrack_l4proto *l4proto)
+                           const struct nf_conntrack_l4proto *l4proto,
+                           const unsigned int *timeouts)
 {
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
@@ -408,7 +409,7 @@ cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
        if (!nest_parms)
                goto nla_put_failure;
 
-       ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, NULL);
+       ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, timeouts);
        if (ret < 0)
                goto nla_put_failure;
 
@@ -430,6 +431,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
                                 struct netlink_ext_ack *extack)
 {
        const struct nf_conntrack_l4proto *l4proto;
+       unsigned int *timeouts = NULL;
        struct sk_buff *skb2;
        int ret, err;
        __u16 l3num;
@@ -442,12 +444,44 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
        l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
        l4proto = nf_ct_l4proto_find_get(l4num);
 
-       /* This protocol is not supported, skip. */
-       if (l4proto->l4proto != l4num) {
-               err = -EOPNOTSUPP;
+       err = -EOPNOTSUPP;
+       if (l4proto->l4proto != l4num)
                goto err;
+
+       switch (l4proto->l4proto) {
+       case IPPROTO_ICMP:
+               timeouts = &nf_icmp_pernet(net)->timeout;
+               break;
+       case IPPROTO_TCP:
+               timeouts = nf_tcp_pernet(net)->timeouts;
+               break;
+       case IPPROTO_UDP:
+               timeouts = nf_udp_pernet(net)->timeouts;
+               break;
+       case IPPROTO_DCCP:
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+               timeouts = nf_dccp_pernet(net)->dccp_timeout;
+#endif
+               break;
+       case IPPROTO_ICMPV6:
+               timeouts = &nf_icmpv6_pernet(net)->timeout;
+               break;
+       case IPPROTO_SCTP:
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+               timeouts = nf_sctp_pernet(net)->timeouts;
+#endif
+               break;
+       case 255:
+               timeouts = &nf_generic_pernet(net)->timeout;
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               break;
        }
 
+       if (!timeouts)
+               goto err;
+
        skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (skb2 == NULL) {
                err = -ENOMEM;
@@ -458,8 +492,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
                                          nlh->nlmsg_seq,
                                          NFNL_MSG_TYPE(nlh->nlmsg_type),
                                          IPCTNL_MSG_TIMEOUT_DEFAULT_SET,
-                                         l3num,
-                                         l4proto);
+                                         l3num, l4proto, timeouts);
        if (ret <= 0) {
                kfree_skb(skb2);
                err = -ENOMEM;
index 768292eac2a46afe84df3b8a949a70bf77baf478..9d0ede4742240f544bbdff40553b73b8e755c7b4 100644 (file)
@@ -54,9 +54,11 @@ static bool nft_xt_put(struct nft_xt *xt)
        return false;
 }
 
-static int nft_compat_chain_validate_dependency(const char *tablename,
-                                               const struct nft_chain *chain)
+static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx,
+                                               const char *tablename)
 {
+       enum nft_chain_types type = NFT_CHAIN_T_DEFAULT;
+       const struct nft_chain *chain = ctx->chain;
        const struct nft_base_chain *basechain;
 
        if (!tablename ||
@@ -64,9 +66,12 @@ static int nft_compat_chain_validate_dependency(const char *tablename,
                return 0;
 
        basechain = nft_base_chain(chain);
-       if (strcmp(tablename, "nat") == 0 &&
-           basechain->type->type != NFT_CHAIN_T_NAT)
-               return -EINVAL;
+       if (strcmp(tablename, "nat") == 0) {
+               if (ctx->family != NFPROTO_BRIDGE)
+                       type = NFT_CHAIN_T_NAT;
+               if (basechain->type->type != type)
+                       return -EINVAL;
+       }
 
        return 0;
 }
@@ -342,8 +347,7 @@ static int nft_target_validate(const struct nft_ctx *ctx,
                if (target->hooks && !(hook_mask & target->hooks))
                        return -EINVAL;
 
-               ret = nft_compat_chain_validate_dependency(target->table,
-                                                          ctx->chain);
+               ret = nft_compat_chain_validate_dependency(ctx, target->table);
                if (ret < 0)
                        return ret;
        }
@@ -590,8 +594,7 @@ static int nft_match_validate(const struct nft_ctx *ctx,
                if (match->hooks && !(hook_mask & match->hooks))
                        return -EINVAL;
 
-               ret = nft_compat_chain_validate_dependency(match->table,
-                                                          ctx->chain);
+               ret = nft_compat_chain_validate_dependency(ctx, match->table);
                if (ret < 0)
                        return ret;
        }
index 649d1700ec5ba026307c46596112b6b3fb667255..3cc1b3dc3c3cdb2508cef7825f3bd9c485679fdb 100644 (file)
@@ -24,7 +24,6 @@ struct nft_ng_inc {
        u32                     modulus;
        atomic_t                counter;
        u32                     offset;
-       struct nft_set          *map;
 };
 
 static u32 nft_ng_inc_gen(struct nft_ng_inc *priv)
@@ -48,34 +47,11 @@ static void nft_ng_inc_eval(const struct nft_expr *expr,
        regs->data[priv->dreg] = nft_ng_inc_gen(priv);
 }
 
-static void nft_ng_inc_map_eval(const struct nft_expr *expr,
-                               struct nft_regs *regs,
-                               const struct nft_pktinfo *pkt)
-{
-       struct nft_ng_inc *priv = nft_expr_priv(expr);
-       const struct nft_set *map = priv->map;
-       const struct nft_set_ext *ext;
-       u32 result;
-       bool found;
-
-       result = nft_ng_inc_gen(priv);
-       found = map->ops->lookup(nft_net(pkt), map, &result, &ext);
-
-       if (!found)
-               return;
-
-       nft_data_copy(&regs->data[priv->dreg],
-                     nft_set_ext_data(ext), map->dlen);
-}
-
 static const struct nla_policy nft_ng_policy[NFTA_NG_MAX + 1] = {
        [NFTA_NG_DREG]          = { .type = NLA_U32 },
        [NFTA_NG_MODULUS]       = { .type = NLA_U32 },
        [NFTA_NG_TYPE]          = { .type = NLA_U32 },
        [NFTA_NG_OFFSET]        = { .type = NLA_U32 },
-       [NFTA_NG_SET_NAME]      = { .type = NLA_STRING,
-                                   .len = NFT_SET_MAXNAMELEN - 1 },
-       [NFTA_NG_SET_ID]        = { .type = NLA_U32 },
 };
 
 static int nft_ng_inc_init(const struct nft_ctx *ctx,
@@ -101,22 +77,6 @@ static int nft_ng_inc_init(const struct nft_ctx *ctx,
                                           NFT_DATA_VALUE, sizeof(u32));
 }
 
-static int nft_ng_inc_map_init(const struct nft_ctx *ctx,
-                              const struct nft_expr *expr,
-                              const struct nlattr * const tb[])
-{
-       struct nft_ng_inc *priv = nft_expr_priv(expr);
-       u8 genmask = nft_genmask_next(ctx->net);
-
-       nft_ng_inc_init(ctx, expr, tb);
-
-       priv->map = nft_set_lookup_global(ctx->net, ctx->table,
-                                         tb[NFTA_NG_SET_NAME],
-                                         tb[NFTA_NG_SET_ID], genmask);
-
-       return PTR_ERR_OR_ZERO(priv->map);
-}
-
 static int nft_ng_dump(struct sk_buff *skb, enum nft_registers dreg,
                       u32 modulus, enum nft_ng_types type, u32 offset)
 {
@@ -143,27 +103,10 @@ static int nft_ng_inc_dump(struct sk_buff *skb, const struct nft_expr *expr)
                           priv->offset);
 }
 
-static int nft_ng_inc_map_dump(struct sk_buff *skb,
-                              const struct nft_expr *expr)
-{
-       const struct nft_ng_inc *priv = nft_expr_priv(expr);
-
-       if (nft_ng_dump(skb, priv->dreg, priv->modulus,
-                       NFT_NG_INCREMENTAL, priv->offset) ||
-           nla_put_string(skb, NFTA_NG_SET_NAME, priv->map->name))
-               goto nla_put_failure;
-
-       return 0;
-
-nla_put_failure:
-       return -1;
-}
-
 struct nft_ng_random {
        enum nft_registers      dreg:8;
        u32                     modulus;
        u32                     offset;
-       struct nft_set          *map;
 };
 
 static u32 nft_ng_random_gen(struct nft_ng_random *priv)
@@ -183,25 +126,6 @@ static void nft_ng_random_eval(const struct nft_expr *expr,
        regs->data[priv->dreg] = nft_ng_random_gen(priv);
 }
 
-static void nft_ng_random_map_eval(const struct nft_expr *expr,
-                                  struct nft_regs *regs,
-                                  const struct nft_pktinfo *pkt)
-{
-       struct nft_ng_random *priv = nft_expr_priv(expr);
-       const struct nft_set *map = priv->map;
-       const struct nft_set_ext *ext;
-       u32 result;
-       bool found;
-
-       result = nft_ng_random_gen(priv);
-       found = map->ops->lookup(nft_net(pkt), map, &result, &ext);
-       if (!found)
-               return;
-
-       nft_data_copy(&regs->data[priv->dreg],
-                     nft_set_ext_data(ext), map->dlen);
-}
-
 static int nft_ng_random_init(const struct nft_ctx *ctx,
                              const struct nft_expr *expr,
                              const struct nlattr * const tb[])
@@ -226,21 +150,6 @@ static int nft_ng_random_init(const struct nft_ctx *ctx,
                                           NFT_DATA_VALUE, sizeof(u32));
 }
 
-static int nft_ng_random_map_init(const struct nft_ctx *ctx,
-                                 const struct nft_expr *expr,
-                                 const struct nlattr * const tb[])
-{
-       struct nft_ng_random *priv = nft_expr_priv(expr);
-       u8 genmask = nft_genmask_next(ctx->net);
-
-       nft_ng_random_init(ctx, expr, tb);
-       priv->map = nft_set_lookup_global(ctx->net, ctx->table,
-                                         tb[NFTA_NG_SET_NAME],
-                                         tb[NFTA_NG_SET_ID], genmask);
-
-       return PTR_ERR_OR_ZERO(priv->map);
-}
-
 static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr)
 {
        const struct nft_ng_random *priv = nft_expr_priv(expr);
@@ -249,22 +158,6 @@ static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr)
                           priv->offset);
 }
 
-static int nft_ng_random_map_dump(struct sk_buff *skb,
-                                 const struct nft_expr *expr)
-{
-       const struct nft_ng_random *priv = nft_expr_priv(expr);
-
-       if (nft_ng_dump(skb, priv->dreg, priv->modulus,
-                       NFT_NG_RANDOM, priv->offset) ||
-           nla_put_string(skb, NFTA_NG_SET_NAME, priv->map->name))
-               goto nla_put_failure;
-
-       return 0;
-
-nla_put_failure:
-       return -1;
-}
-
 static struct nft_expr_type nft_ng_type;
 static const struct nft_expr_ops nft_ng_inc_ops = {
        .type           = &nft_ng_type,
@@ -274,14 +167,6 @@ static const struct nft_expr_ops nft_ng_inc_ops = {
        .dump           = nft_ng_inc_dump,
 };
 
-static const struct nft_expr_ops nft_ng_inc_map_ops = {
-       .type           = &nft_ng_type,
-       .size           = NFT_EXPR_SIZE(sizeof(struct nft_ng_inc)),
-       .eval           = nft_ng_inc_map_eval,
-       .init           = nft_ng_inc_map_init,
-       .dump           = nft_ng_inc_map_dump,
-};
-
 static const struct nft_expr_ops nft_ng_random_ops = {
        .type           = &nft_ng_type,
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_ng_random)),
@@ -290,14 +175,6 @@ static const struct nft_expr_ops nft_ng_random_ops = {
        .dump           = nft_ng_random_dump,
 };
 
-static const struct nft_expr_ops nft_ng_random_map_ops = {
-       .type           = &nft_ng_type,
-       .size           = NFT_EXPR_SIZE(sizeof(struct nft_ng_random)),
-       .eval           = nft_ng_random_map_eval,
-       .init           = nft_ng_random_map_init,
-       .dump           = nft_ng_random_map_dump,
-};
-
 static const struct nft_expr_ops *
 nft_ng_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
 {
@@ -312,12 +189,8 @@ nft_ng_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
 
        switch (type) {
        case NFT_NG_INCREMENTAL:
-               if (tb[NFTA_NG_SET_NAME])
-                       return &nft_ng_inc_map_ops;
                return &nft_ng_inc_ops;
        case NFT_NG_RANDOM:
-               if (tb[NFTA_NG_SET_NAME])
-                       return &nft_ng_random_map_ops;
                return &nft_ng_random_ops;
        }
 
index ca5e5d8c5ef8b91cd61cb039d652f4549c343948..b13618c764ec296377778ee405b9067515ada25a 100644 (file)
@@ -50,7 +50,7 @@ static int nft_osf_init(const struct nft_ctx *ctx,
        int err;
        u8 ttl;
 
-       if (nla_get_u8(tb[NFTA_OSF_TTL])) {
+       if (tb[NFTA_OSF_TTL]) {
                ttl = nla_get_u8(tb[NFTA_OSF_TTL]);
                if (ttl > 2)
                        return -EINVAL;
index c6acfc2d9c8414d36173e3cf09f94ea64f0d7515..eb4cbd244c3d311e2630a4c4cae868f0343c30f0 100644 (file)
@@ -114,6 +114,22 @@ static void idletimer_tg_expired(struct timer_list *t)
        schedule_work(&timer->work);
 }
 
+static int idletimer_check_sysfs_name(const char *name, unsigned int size)
+{
+       int ret;
+
+       ret = xt_check_proc_name(name, size);
+       if (ret < 0)
+               return ret;
+
+       if (!strcmp(name, "power") ||
+           !strcmp(name, "subsystem") ||
+           !strcmp(name, "uevent"))
+               return -EINVAL;
+
+       return 0;
+}
+
 static int idletimer_tg_create(struct idletimer_tg_info *info)
 {
        int ret;
@@ -124,6 +140,10 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
                goto out;
        }
 
+       ret = idletimer_check_sysfs_name(info->label, sizeof(info->label));
+       if (ret < 0)
+               goto out_free_timer;
+
        sysfs_attr_init(&info->timer->attr.attr);
        info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
        if (!info->timer->attr.attr.name) {
index 6bec37ab4472796ecd1f453966b27bb911bf8fa8..a4660c48ff0149ad1798cd646d4edb2ed5c7770f 100644 (file)
@@ -1203,7 +1203,8 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
                                         &info->labels.mask);
                if (err)
                        return err;
-       } else if (labels_nonzero(&info->labels.mask)) {
+       } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
+                  labels_nonzero(&info->labels.mask)) {
                err = ovs_ct_set_labels(ct, key, &info->labels.value,
                                        &info->labels.mask);
                if (err)
index 382196e57a26c137f03fea19cb2ff5d8d69c728b..bc628acf4f4ffe7172e1be6591811e056ccb4f1b 100644 (file)
@@ -611,6 +611,7 @@ struct rxrpc_call {
                                                 * not hard-ACK'd packet follows this.
                                                 */
        rxrpc_seq_t             tx_top;         /* Highest Tx slot allocated. */
+       u16                     tx_backoff;     /* Delay to insert due to Tx failure */
 
        /* TCP-style slow-start congestion control [RFC5681].  Since the SMSS
         * is fixed, we keep these numbers in terms of segments (ie. DATA
index 8e7434e92097e8f0a2676bcf87df090daf43ee2e..468efc3660c03805608d5e4f2f146f007e03f9b1 100644 (file)
@@ -123,6 +123,7 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
                else
                        ack_at = expiry;
 
+               ack_at += READ_ONCE(call->tx_backoff);
                ack_at += now;
                if (time_before(ack_at, call->ack_at)) {
                        WRITE_ONCE(call->ack_at, ack_at);
@@ -311,6 +312,7 @@ void rxrpc_process_call(struct work_struct *work)
                container_of(work, struct rxrpc_call, processor);
        rxrpc_serial_t *send_ack;
        unsigned long now, next, t;
+       unsigned int iterations = 0;
 
        rxrpc_see_call(call);
 
@@ -319,6 +321,11 @@ void rxrpc_process_call(struct work_struct *work)
               call->debug_id, rxrpc_call_states[call->state], call->events);
 
 recheck_state:
+       /* Limit the number of times we do this before returning to the manager */
+       iterations++;
+       if (iterations > 5)
+               goto requeue;
+
        if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
                rxrpc_send_abort_packet(call);
                goto recheck_state;
@@ -447,13 +454,16 @@ recheck_state:
        rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
 
        /* other events may have been raised since we started checking */
-       if (call->events && call->state < RXRPC_CALL_COMPLETE) {
-               __rxrpc_queue_call(call);
-               goto out;
-       }
+       if (call->events && call->state < RXRPC_CALL_COMPLETE)
+               goto requeue;
 
 out_put:
        rxrpc_put_call(call, rxrpc_call_put);
 out:
        _leave("");
+       return;
+
+requeue:
+       __rxrpc_queue_call(call);
+       goto out;
 }
index 1894188888391fca2ef98a5324de7bc99c4b381f..736aa92811004cfe5d157abd4827710783f8d57c 100644 (file)
@@ -34,6 +34,21 @@ struct rxrpc_abort_buffer {
 
 static const char rxrpc_keepalive_string[] = "";
 
+/*
+ * Increase Tx backoff on transmission failure and clear it on success.
+ */
+static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret)
+{
+       if (ret < 0) {
+               u16 tx_backoff = READ_ONCE(call->tx_backoff);
+
+               if (tx_backoff < HZ)
+                       WRITE_ONCE(call->tx_backoff, tx_backoff + 1);
+       } else {
+               WRITE_ONCE(call->tx_backoff, 0);
+       }
+}
+
 /*
  * Arrange for a keepalive ping a certain time after we last transmitted.  This
  * lets the far side know we're still interested in this call and helps keep
@@ -210,6 +225,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
        else
                trace_rxrpc_tx_packet(call->debug_id, &pkt->whdr,
                                      rxrpc_tx_point_call_ack);
+       rxrpc_tx_backoff(call, ret);
 
        if (call->state < RXRPC_CALL_COMPLETE) {
                if (ret < 0) {
@@ -218,7 +234,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
                        rxrpc_propose_ACK(call, pkt->ack.reason,
                                          ntohs(pkt->ack.maxSkew),
                                          ntohl(pkt->ack.serial),
-                                         true, true,
+                                         false, true,
                                          rxrpc_propose_ack_retry_tx);
                } else {
                        spin_lock_bh(&call->lock);
@@ -300,7 +316,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
        else
                trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr,
                                      rxrpc_tx_point_call_abort);
-
+       rxrpc_tx_backoff(call, ret);
 
        rxrpc_put_connection(conn);
        return ret;
@@ -413,6 +429,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
        else
                trace_rxrpc_tx_packet(call->debug_id, &whdr,
                                      rxrpc_tx_point_call_data_nofrag);
+       rxrpc_tx_backoff(call, ret);
        if (ret == -EMSGSIZE)
                goto send_fragmentable;
 
@@ -445,9 +462,18 @@ done:
                        rxrpc_reduce_call_timer(call, expect_rx_by, nowj,
                                                rxrpc_timer_set_for_normal);
                }
-       }
 
-       rxrpc_set_keepalive(call);
+               rxrpc_set_keepalive(call);
+       } else {
+               /* Cancel the call if the initial transmission fails,
+                * particularly if that's due to network routing issues that
+                * aren't going away anytime soon.  The layer above can arrange
+                * the retransmission.
+                */
+               if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags))
+                       rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
+                                                 RX_USER_ABORT, ret);
+       }
 
        _leave(" = %d [%u]", ret, call->peer->maxdata);
        return ret;
@@ -506,6 +532,7 @@ send_fragmentable:
        else
                trace_rxrpc_tx_packet(call->debug_id, &whdr,
                                      rxrpc_tx_point_call_data_frag);
+       rxrpc_tx_backoff(call, ret);
 
        up_write(&conn->params.local->defrag_sem);
        goto done;
index 1dae5f2b358fcf3dac2bbc0ef80f53b0dba16c91..c8cf4d10c4355f934c02d407ec725670e36433b8 100644 (file)
@@ -258,7 +258,8 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
        if (is_redirect) {
                skb2->tc_redirected = 1;
                skb2->tc_from_ingress = skb2->tc_at_ingress;
-
+               if (skb2->tc_from_ingress)
+                       skb2->tstamp = 0;
                /* let's the caller reinsert the packet, if possible */
                if (use_reinsert) {
                        res->ingress = want_ingress;
index 9aada2d0ef06567a962f5bb2e929370278e5a2af..c6c327874abcc9974adf2675c8326a967c6df6f6 100644 (file)
@@ -709,11 +709,23 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
                          struct netlink_ext_ack *extack)
 {
        const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
-       int option_len, key_depth, msk_depth = 0;
+       int err, option_len, key_depth, msk_depth = 0;
+
+       err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS],
+                                 TCA_FLOWER_KEY_ENC_OPTS_MAX,
+                                 enc_opts_policy, extack);
+       if (err)
+               return err;
 
        nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
 
        if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
+               err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
+                                         TCA_FLOWER_KEY_ENC_OPTS_MAX,
+                                         enc_opts_policy, extack);
+               if (err)
+                       return err;
+
                nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
                msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
        }
index 57b3ad9394ad7a9f42c48d7645a61a6a03b2efe0..2c38e3d0792468162ee0dc4137f1400160ab9276 100644 (file)
@@ -648,15 +648,6 @@ deliver:
                         */
                        skb->dev = qdisc_dev(sch);
 
-#ifdef CONFIG_NET_CLS_ACT
-                       /*
-                        * If it's at ingress let's pretend the delay is
-                        * from the network (tstamp will be updated).
-                        */
-                       if (skb->tc_redirected && skb->tc_from_ingress)
-                               skb->tstamp = 0;
-#endif
-
                        if (q->slot.slot_next) {
                                q->slot.packets_left--;
                                q->slot.bytes_left -= qdisc_pkt_len(skb);
index 9cb854b05342e57a6743ee1fd7e91cab7c09bbd2..c37e1c2dec9d451f5bfc8ffd8a0f8b9d00358316 100644 (file)
@@ -212,7 +212,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
        INIT_LIST_HEAD(&q->retransmit);
        INIT_LIST_HEAD(&q->sacked);
        INIT_LIST_HEAD(&q->abandoned);
-       sctp_sched_set_sched(asoc, SCTP_SS_FCFS);
+       sctp_sched_set_sched(asoc, SCTP_SS_DEFAULT);
 }
 
 /* Free the outqueue structure and any related pending chunks.
index d8831b988b1e7a3273c73ee2457615b26c24b529..ab4a3be1542a0a6fcfb35153da1a62b3f852c2b6 100644 (file)
@@ -281,13 +281,7 @@ static bool generic_key_to_expire(struct rpc_cred *cred)
 {
        struct auth_cred *acred = &container_of(cred, struct generic_cred,
                                                gc_base)->acred;
-       bool ret;
-
-       get_rpccred(cred);
-       ret = test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags);
-       put_rpccred(cred);
-
-       return ret;
+       return test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags);
 }
 
 static const struct rpc_credops generic_credops = {
index 30f970cdc7f66375d45e5363dfa07cab233f4978..5d3f252659f191fe040452e6b55a862d850450e8 100644 (file)
@@ -1239,36 +1239,59 @@ gss_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
        return &gss_auth->rpc_auth;
 }
 
+static struct gss_cred *
+gss_dup_cred(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
+{
+       struct gss_cred *new;
+
+       /* Make a copy of the cred so that we can reference count it */
+       new = kzalloc(sizeof(*gss_cred), GFP_NOIO);
+       if (new) {
+               struct auth_cred acred = {
+                       .uid = gss_cred->gc_base.cr_uid,
+               };
+               struct gss_cl_ctx *ctx =
+                       rcu_dereference_protected(gss_cred->gc_ctx, 1);
+
+               rpcauth_init_cred(&new->gc_base, &acred,
+                               &gss_auth->rpc_auth,
+                               &gss_nullops);
+               new->gc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE;
+               new->gc_service = gss_cred->gc_service;
+               new->gc_principal = gss_cred->gc_principal;
+               kref_get(&gss_auth->kref);
+               rcu_assign_pointer(new->gc_ctx, ctx);
+               gss_get_ctx(ctx);
+       }
+       return new;
+}
+
 /*
- * gss_destroying_context will cause the RPCSEC_GSS to send a NULL RPC call
+ * gss_send_destroy_context will cause the RPCSEC_GSS to send a NULL RPC call
  * to the server with the GSS control procedure field set to
  * RPC_GSS_PROC_DESTROY. This should normally cause the server to release
  * all RPCSEC_GSS state associated with that context.
  */
-static int
-gss_destroying_context(struct rpc_cred *cred)
+static void
+gss_send_destroy_context(struct rpc_cred *cred)
 {
        struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
        struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
        struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1);
+       struct gss_cred *new;
        struct rpc_task *task;
 
-       if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0)
-               return 0;
-
-       ctx->gc_proc = RPC_GSS_PROC_DESTROY;
-       cred->cr_ops = &gss_nullops;
-
-       /* Take a reference to ensure the cred will be destroyed either
-        * by the RPC call or by the put_rpccred() below */
-       get_rpccred(cred);
+       new = gss_dup_cred(gss_auth, gss_cred);
+       if (new) {
+               ctx->gc_proc = RPC_GSS_PROC_DESTROY;
 
-       task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC|RPC_TASK_SOFT);
-       if (!IS_ERR(task))
-               rpc_put_task(task);
+               task = rpc_call_null(gss_auth->client, &new->gc_base,
+                               RPC_TASK_ASYNC|RPC_TASK_SOFT);
+               if (!IS_ERR(task))
+                       rpc_put_task(task);
 
-       put_rpccred(cred);
-       return 1;
+               put_rpccred(&new->gc_base);
+       }
 }
 
 /* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure
@@ -1330,8 +1353,8 @@ static void
 gss_destroy_cred(struct rpc_cred *cred)
 {
 
-       if (gss_destroying_context(cred))
-               return;
+       if (test_and_clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0)
+               gss_send_destroy_context(cred);
        gss_destroy_nullcred(cred);
 }
 
index 2bbb8d38d2bf5f6eeb87a5771aeb92683d25543f..f302c6eb8779063a71b9a590325a96b8026ab3e6 100644 (file)
@@ -546,7 +546,7 @@ EXPORT_SYMBOL_GPL(xdr_commit_encode);
 static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
                size_t nbytes)
 {
-       static __be32 *p;
+       __be32 *p;
        int space_left;
        int frag1bytes, frag2bytes;
 
@@ -673,11 +673,10 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
                WARN_ON_ONCE(xdr->iov);
                return;
        }
-       if (fraglen) {
+       if (fraglen)
                xdr->end = head->iov_base + head->iov_len;
-               xdr->page_ptr--;
-       }
        /* (otherwise assume xdr->end is already set) */
+       xdr->page_ptr--;
        head->iov_len = len;
        buf->len = len;
        xdr->p = head->iov_base + head->iov_len;
index 201c3b5bc96be9fb412dbc60522b1513d2494a8f..836727e363c46290ab8ef55e9d7b630f1dfac293 100644 (file)
@@ -1594,14 +1594,17 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
                if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
                        l->priority = peers_prio;
 
-               /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
-               if (msg_peer_stopping(hdr))
+               /* If peer is going down we want full re-establish cycle */
+               if (msg_peer_stopping(hdr)) {
                        rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
-               else if ((mtyp == RESET_MSG) || !link_is_up(l))
+                       break;
+               }
+               /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
+               if (mtyp == RESET_MSG || !link_is_up(l))
                        rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
 
                /* ACTIVATE_MSG takes up link if it was already locally reset */
-               if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
+               if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
                        rc = TIPC_LINK_UP_EVT;
 
                l->peer_session = msg_session(hdr);
index a0149db00be752555ff4178f66540543a5835955..6c6439f69a725f4bd46210fb3bb2c3e80a49fab2 100755 (executable)
@@ -71,7 +71,7 @@ die() {
 
 # Try to figure out the source directory prefix so we can remove it from the
 # addr2line output.  HACK ALERT: This assumes that start_kernel() is in
-# kernel/init.c!  This only works for vmlinux.  Otherwise it falls back to
+# init/main.c!  This only works for vmlinux.  Otherwise it falls back to
 # printing the absolute path.
 find_dir_prefix() {
        local objfile=$1
index da66e7742282a65eb8457d3427b5bfab7f950415..0ef906499646b57293bb85dea54234c601bfcafa 100755 (executable)
@@ -102,7 +102,8 @@ if [ ! -r "$INITFILE" ]; then
 fi
 
 MERGE_LIST=$*
-SED_CONFIG_EXP="s/^\(# \)\{0,1\}\(${CONFIG_PREFIX}[a-zA-Z0-9_]*\)[= ].*/\2/p"
+SED_CONFIG_EXP1="s/^\(${CONFIG_PREFIX}[a-zA-Z0-9_]*\)=.*/\1/p"
+SED_CONFIG_EXP2="s/^# \(${CONFIG_PREFIX}[a-zA-Z0-9_]*\) is not set$/\1/p"
 
 TMP_FILE=$(mktemp ./.tmp.config.XXXXXXXXXX)
 
@@ -116,7 +117,7 @@ for MERGE_FILE in $MERGE_LIST ; do
                echo "The merge file '$MERGE_FILE' does not exist.  Exit." >&2
                exit 1
        fi
-       CFG_LIST=$(sed -n "$SED_CONFIG_EXP" $MERGE_FILE)
+       CFG_LIST=$(sed -n -e "$SED_CONFIG_EXP1" -e "$SED_CONFIG_EXP2" $MERGE_FILE)
 
        for CFG in $CFG_LIST ; do
                grep -q -w $CFG $TMP_FILE || continue
@@ -159,7 +160,7 @@ make KCONFIG_ALLCONFIG=$TMP_FILE $OUTPUT_ARG $ALLTARGET
 
 
 # Check all specified config values took (might have missed-dependency issues)
-for CFG in $(sed -n "$SED_CONFIG_EXP" $TMP_FILE); do
+for CFG in $(sed -n -e "$SED_CONFIG_EXP1" -e "$SED_CONFIG_EXP2" $TMP_FILE); do
 
        REQUESTED_VAL=$(grep -w -e "$CFG" $TMP_FILE)
        ACTUAL_VAL=$(grep -w -e "$CFG" "$KCONFIG_CONFIG")
index 90c9a8ac7adb81d9dc10d0cb00eda86ee3a35a8f..f43a274f4f1d5b820c00826d939b20d325b2311f 100755 (executable)
@@ -81,11 +81,11 @@ else
        cp System.map "$tmpdir/boot/System.map-$version"
        cp $KCONFIG_CONFIG "$tmpdir/boot/config-$version"
 fi
-cp "$($MAKE -s image_name)" "$tmpdir/$installed_image_path"
+cp "$($MAKE -s -f $srctree/Makefile image_name)" "$tmpdir/$installed_image_path"
 
-if grep -q "^CONFIG_OF=y" $KCONFIG_CONFIG ; then
+if grep -q "^CONFIG_OF_EARLY_FLATTREE=y" $KCONFIG_CONFIG ; then
        # Only some architectures with OF support have this target
-       if grep -q dtbs_install "${srctree}/arch/$SRCARCH/Makefile"; then
+       if [ -d "${srctree}/arch/$SRCARCH/boot/dts" ]; then
                $MAKE KBUILD_SRC= INSTALL_DTBS_PATH="$tmpdir/usr/lib/$packagename" dtbs_install
        fi
 fi
index 663a7f343b42c5417e4769a8da65754fd6eab859..edcad61fe3cdae66b8e8fe497f7f52329591ee0a 100755 (executable)
@@ -88,6 +88,7 @@ set_debarch() {
 version=$KERNELRELEASE
 if [ -n "$KDEB_PKGVERSION" ]; then
        packageversion=$KDEB_PKGVERSION
+       revision=${packageversion##*-}
 else
        revision=$(cat .version 2>/dev/null||echo 1)
        packageversion=$version-$revision
@@ -205,10 +206,12 @@ cat <<EOF > debian/rules
 #!$(command -v $MAKE) -f
 
 build:
-       \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} KBUILD_SRC=
+       \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} \
+       KBUILD_BUILD_VERSION=${revision} KBUILD_SRC=
 
 binary-arch:
-       \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} KBUILD_SRC= intdeb-pkg
+       \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} \
+       KBUILD_BUILD_VERSION=${revision} KBUILD_SRC= intdeb-pkg
 
 clean:
        rm -rf debian/*tmp debian/files
index e05646dc24dcf633830ce6d8dd3da5f71a29731e..009147d4718eeead8117413a8bdf95108c84e4dd 100755 (executable)
@@ -12,6 +12,7 @@
 # how we were called determines which rpms we build and how we build them
 if [ "$1" = prebuilt ]; then
        S=DEL
+       MAKE="$MAKE -f $srctree/Makefile"
 else
        S=
 fi
@@ -78,19 +79,19 @@ $S  %prep
 $S     %setup -q
 $S
 $S     %build
-$S     make %{?_smp_mflags} KBUILD_BUILD_VERSION=%{release}
+$S     $MAKE %{?_smp_mflags} KBUILD_BUILD_VERSION=%{release}
 $S
        %install
        mkdir -p %{buildroot}/boot
        %ifarch ia64
        mkdir -p %{buildroot}/boot/efi
-       cp \$(make image_name) %{buildroot}/boot/efi/vmlinuz-$KERNELRELEASE
+       cp \$($MAKE image_name) %{buildroot}/boot/efi/vmlinuz-$KERNELRELEASE
        ln -s efi/vmlinuz-$KERNELRELEASE %{buildroot}/boot/
        %else
-       cp \$(make image_name) %{buildroot}/boot/vmlinuz-$KERNELRELEASE
+       cp \$($MAKE image_name) %{buildroot}/boot/vmlinuz-$KERNELRELEASE
        %endif
-$M     make %{?_smp_mflags} INSTALL_MOD_PATH=%{buildroot} KBUILD_SRC= modules_install
-       make %{?_smp_mflags} INSTALL_HDR_PATH=%{buildroot}/usr KBUILD_SRC= headers_install
+$M     $MAKE %{?_smp_mflags} INSTALL_MOD_PATH=%{buildroot} modules_install
+       $MAKE %{?_smp_mflags} INSTALL_HDR_PATH=%{buildroot}/usr headers_install
        cp System.map %{buildroot}/boot/System.map-$KERNELRELEASE
        cp .config %{buildroot}/boot/config-$KERNELRELEASE
        bzip2 -9 --keep vmlinux
index 79f7dd57d571e749dc3e36ed95a473cd599e0056..71f39410691b6be14774102000507c834c8ad493 100755 (executable)
@@ -74,7 +74,7 @@ scm_version()
                fi
 
                # Check for uncommitted changes
-               if git status -uno --porcelain | grep -qv '^.. scripts/package'; then
+               if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then
                        printf '%s' -dirty
                fi
 
index 839e190bbd7a0075ef28040291fca87f44d3170a..5056fb3b897d0094e182bba4fca08ae491dab7cc 100755 (executable)
@@ -168,7 +168,6 @@ class id_parser(object):
         self.curline = 0
         try:
             for line in fd:
-                line = line.decode(locale.getpreferredencoding(False), errors='ignore')
                 self.curline += 1
                 if self.curline > maxlines:
                     break
index 6dc0751445087727f9d1b09a8c1efdbbc9105625..d775e03fbbcc7d87b9724529a4499b9449a9aa46 100644 (file)
@@ -106,6 +106,7 @@ int asymmetric_verify(struct key *keyring, const char *sig,
 
        pks.pkey_algo = "rsa";
        pks.hash_algo = hash_algo_name[hdr->hash_algo];
+       pks.encoding = "pkcs1";
        pks.digest = (u8 *)data;
        pks.digest_size = datalen;
        pks.s = hdr->sig;
index 7ce683259357750cdfd25feb978ee1d6a984312b..a67459eb62d5c8d3066a72c354f13894f68be8ae 100644 (file)
@@ -5318,6 +5318,9 @@ static int selinux_sctp_bind_connect(struct sock *sk, int optname,
        addr_buf = address;
 
        while (walk_size < addrlen) {
+               if (walk_size + sizeof(sa_family_t) > addrlen)
+                       return -EINVAL;
+
                addr = addr_buf;
                switch (addr->sa_family) {
                case AF_UNSPEC:
index 2fe459df3c858dc038e2a5e5f851ee4a0f07fafa..b7efa2296969c617dc292759b61c3c6f1ca450a5 100644 (file)
@@ -245,9 +245,13 @@ int mls_context_to_sid(struct policydb *pol,
        char *rangep[2];
 
        if (!pol->mls_enabled) {
-               if ((def_sid != SECSID_NULL && oldc) || (*scontext) == '\0')
-                       return 0;
-               return -EINVAL;
+               /*
+                * With no MLS, only return -EINVAL if there is a MLS field
+                * and it did not come from an xattr.
+                */
+               if (oldc && def_sid == SECSID_NULL)
+                       return -EINVAL;
+               return 0;
        }
 
        /*
index 97f49b751e6eb3583c948e7e35c0ddc8860cc818..568575b72f2f7269c727a202f6544b6f51a57d03 100644 (file)
@@ -58,8 +58,8 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
                        removefunc = false;
                }
                if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0 &&
-                   snd_hda_gen_add_micmute_led(codec,
-                                               update_tpacpi_micmute) > 0)
+                   !snd_hda_gen_add_micmute_led(codec,
+                                                update_tpacpi_micmute))
                        removefunc = false;
        }
 
index 83d76c345940557f25a6e2b08150bacbd3dc404f..00c92eb854ce7b65daedc7c3fa98f69f9b891d86 100644 (file)
@@ -1648,7 +1648,7 @@ static int had_create_jack(struct snd_intelhad *ctx,
  * PM callbacks
  */
 
-static int hdmi_lpe_audio_runtime_suspend(struct device *dev)
+static int __maybe_unused hdmi_lpe_audio_suspend(struct device *dev)
 {
        struct snd_intelhad_card *card_ctx = dev_get_drvdata(dev);
        int port;
@@ -1664,23 +1664,8 @@ static int hdmi_lpe_audio_runtime_suspend(struct device *dev)
                }
        }
 
-       return 0;
-}
-
-static int __maybe_unused hdmi_lpe_audio_suspend(struct device *dev)
-{
-       struct snd_intelhad_card *card_ctx = dev_get_drvdata(dev);
-       int err;
+       snd_power_change_state(card_ctx->card, SNDRV_CTL_POWER_D3hot);
 
-       err = hdmi_lpe_audio_runtime_suspend(dev);
-       if (!err)
-               snd_power_change_state(card_ctx->card, SNDRV_CTL_POWER_D3hot);
-       return err;
-}
-
-static int hdmi_lpe_audio_runtime_resume(struct device *dev)
-{
-       pm_runtime_mark_last_busy(dev);
        return 0;
 }
 
@@ -1688,8 +1673,10 @@ static int __maybe_unused hdmi_lpe_audio_resume(struct device *dev)
 {
        struct snd_intelhad_card *card_ctx = dev_get_drvdata(dev);
 
-       hdmi_lpe_audio_runtime_resume(dev);
+       pm_runtime_mark_last_busy(dev);
+
        snd_power_change_state(card_ctx->card, SNDRV_CTL_POWER_D0);
+
        return 0;
 }
 
@@ -1877,7 +1864,6 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
 
        pm_runtime_use_autosuspend(&pdev->dev);
        pm_runtime_mark_last_busy(&pdev->dev);
-       pm_runtime_set_active(&pdev->dev);
 
        dev_dbg(&pdev->dev, "%s: handle pending notification\n", __func__);
        for_each_port(card_ctx, port) {
@@ -1908,8 +1894,6 @@ static int hdmi_lpe_audio_remove(struct platform_device *pdev)
 
 static const struct dev_pm_ops hdmi_lpe_audio_pm = {
        SET_SYSTEM_SLEEP_PM_OPS(hdmi_lpe_audio_suspend, hdmi_lpe_audio_resume)
-       SET_RUNTIME_PM_OPS(hdmi_lpe_audio_runtime_suspend,
-                          hdmi_lpe_audio_runtime_resume, NULL)
 };
 
 static struct platform_driver hdmi_lpe_audio_driver = {
index 12835ea0e4173c281e5ddfe0883595492e985b09..378c051fa1776534b0e1fca7e56567f9f8a3d2d4 100644 (file)
 #define wmb()          asm volatile("dmb ishst" ::: "memory")
 #define rmb()          asm volatile("dmb ishld" ::: "memory")
 
-#define smp_store_release(p, v)                                        \
-do {                                                           \
-       union { typeof(*p) __val; char __c[1]; } __u =          \
-               { .__val = (__force typeof(*p)) (v) };          \
-                                                               \
-       switch (sizeof(*p)) {                                   \
-       case 1:                                                 \
-               asm volatile ("stlrb %w1, %0"                   \
-                               : "=Q" (*p)                     \
-                               : "r" (*(__u8 *)__u.__c)        \
-                               : "memory");                    \
-               break;                                          \
-       case 2:                                                 \
-               asm volatile ("stlrh %w1, %0"                   \
-                               : "=Q" (*p)                     \
-                               : "r" (*(__u16 *)__u.__c)       \
-                               : "memory");                    \
-               break;                                          \
-       case 4:                                                 \
-               asm volatile ("stlr %w1, %0"                    \
-                               : "=Q" (*p)                     \
-                               : "r" (*(__u32 *)__u.__c)       \
-                               : "memory");                    \
-               break;                                          \
-       case 8:                                                 \
-               asm volatile ("stlr %1, %0"                     \
-                               : "=Q" (*p)                     \
-                               : "r" (*(__u64 *)__u.__c)       \
-                               : "memory");                    \
-               break;                                          \
-       default:                                                \
-               /* Only to shut up gcc ... */                   \
-               mb();                                           \
-               break;                                          \
-       }                                                       \
+#define smp_store_release(p, v)                                                \
+do {                                                                   \
+       union { typeof(*p) __val; char __c[1]; } __u =                  \
+               { .__val = (v) };                                       \
+                                                                       \
+       switch (sizeof(*p)) {                                           \
+       case 1:                                                         \
+               asm volatile ("stlrb %w1, %0"                           \
+                               : "=Q" (*p)                             \
+                               : "r" (*(__u8_alias_t *)__u.__c)        \
+                               : "memory");                            \
+               break;                                                  \
+       case 2:                                                         \
+               asm volatile ("stlrh %w1, %0"                           \
+                               : "=Q" (*p)                             \
+                               : "r" (*(__u16_alias_t *)__u.__c)       \
+                               : "memory");                            \
+               break;                                                  \
+       case 4:                                                         \
+               asm volatile ("stlr %w1, %0"                            \
+                               : "=Q" (*p)                             \
+                               : "r" (*(__u32_alias_t *)__u.__c)       \
+                               : "memory");                            \
+               break;                                                  \
+       case 8:                                                         \
+               asm volatile ("stlr %1, %0"                             \
+                               : "=Q" (*p)                             \
+                               : "r" (*(__u64_alias_t *)__u.__c)       \
+                               : "memory");                            \
+               break;                                                  \
+       default:                                                        \
+               /* Only to shut up gcc ... */                           \
+               mb();                                                   \
+               break;                                                  \
+       }                                                               \
 } while (0)
 
-#define smp_load_acquire(p)                                    \
-({                                                             \
-       union { typeof(*p) __val; char __c[1]; } __u;           \
-                                                               \
-       switch (sizeof(*p)) {                                   \
-       case 1:                                                 \
-               asm volatile ("ldarb %w0, %1"                   \
-                       : "=r" (*(__u8 *)__u.__c)               \
-                       : "Q" (*p) : "memory");                 \
-               break;                                          \
-       case 2:                                                 \
-               asm volatile ("ldarh %w0, %1"                   \
-                       : "=r" (*(__u16 *)__u.__c)              \
-                       : "Q" (*p) : "memory");                 \
-               break;                                          \
-       case 4:                                                 \
-               asm volatile ("ldar %w0, %1"                    \
-                       : "=r" (*(__u32 *)__u.__c)              \
-                       : "Q" (*p) : "memory");                 \
-               break;                                          \
-       case 8:                                                 \
-               asm volatile ("ldar %0, %1"                     \
-                       : "=r" (*(__u64 *)__u.__c)              \
-                       : "Q" (*p) : "memory");                 \
-               break;                                          \
-       default:                                                \
-               /* Only to shut up gcc ... */                   \
-               mb();                                           \
-               break;                                          \
-       }                                                       \
-       __u.__val;                                              \
+#define smp_load_acquire(p)                                            \
+({                                                                     \
+       union { typeof(*p) __val; char __c[1]; } __u =                  \
+               { .__c = { 0 } };                                       \
+                                                                       \
+       switch (sizeof(*p)) {                                           \
+       case 1:                                                         \
+               asm volatile ("ldarb %w0, %1"                           \
+                       : "=r" (*(__u8_alias_t *)__u.__c)               \
+                       : "Q" (*p) : "memory");                         \
+               break;                                                  \
+       case 2:                                                         \
+               asm volatile ("ldarh %w0, %1"                           \
+                       : "=r" (*(__u16_alias_t *)__u.__c)              \
+                       : "Q" (*p) : "memory");                         \
+               break;                                                  \
+       case 4:                                                         \
+               asm volatile ("ldar %w0, %1"                            \
+                       : "=r" (*(__u32_alias_t *)__u.__c)              \
+                       : "Q" (*p) : "memory");                         \
+               break;                                                  \
+       case 8:                                                         \
+               asm volatile ("ldar %0, %1"                             \
+                       : "=r" (*(__u64_alias_t *)__u.__c)              \
+                       : "Q" (*p) : "memory");                         \
+               break;                                                  \
+       default:                                                        \
+               /* Only to shut up gcc ... */                           \
+               mb();                                                   \
+               break;                                                  \
+       }                                                               \
+       __u.__val;                                                      \
 })
 
 #endif /* _TOOLS_LINUX_ASM_AARCH64_BARRIER_H */
index 236b9b97dfdb1d5d52d6cc9dcb9198cfd2ec1739..667c14e56031b5c3ac91c291aed3c876644d1247 100644 (file)
@@ -55,7 +55,6 @@ counted. The following modifiers exist:
  S - read sample value (PERF_SAMPLE_READ)
  D - pin the event to the PMU
  W - group is weak and will fallback to non-group if not schedulable,
-     only supported in 'perf stat' for now.
 
 The 'p' modifier can be used for specifying how precise the instruction
 address should be. The 'p' modifier can be specified multiple times:
index 3ccb4f0bf0883cd80a8b33f612c649e84aaec126..d95655489f7e17adcd16dcca6a772875f6d1c6b2 100644 (file)
@@ -387,7 +387,7 @@ SHELL = $(SHELL_PATH)
 
 linux_uapi_dir := $(srctree)/tools/include/uapi/linux
 asm_generic_uapi_dir := $(srctree)/tools/include/uapi/asm-generic
-arch_asm_uapi_dir := $(srctree)/tools/arch/$(ARCH)/include/uapi/asm/
+arch_asm_uapi_dir := $(srctree)/tools/arch/$(SRCARCH)/include/uapi/asm/
 
 beauty_outdir := $(OUTPUT)trace/beauty/generated
 beauty_ioctl_outdir := $(beauty_outdir)/ioctl
index 10cf889c6d75d2db1d78014215e30948ae3b1cb4..488779bc4c8d2f6ed8dbcad69e1de5e477ede138 100644 (file)
@@ -391,7 +391,12 @@ try_again:
                                        ui__warning("%s\n", msg);
                                goto try_again;
                        }
-
+                       if ((errno == EINVAL || errno == EBADF) &&
+                           pos->leader != pos &&
+                           pos->weak_group) {
+                               pos = perf_evlist__reset_weak_group(evlist, pos);
+                               goto try_again;
+                       }
                        rc = -errno;
                        perf_evsel__open_strerror(pos, &opts->target,
                                                  errno, msg, sizeof(msg));
index d1028d7755bbcb946a517c333a3757d91e75c8ed..a635abfa77b6a9e38fa7aa994ea4cfd31608f16a 100644 (file)
@@ -383,32 +383,6 @@ static bool perf_evsel__should_store_id(struct perf_evsel *counter)
        return STAT_RECORD || counter->attr.read_format & PERF_FORMAT_ID;
 }
 
-static struct perf_evsel *perf_evsel__reset_weak_group(struct perf_evsel *evsel)
-{
-       struct perf_evsel *c2, *leader;
-       bool is_open = true;
-
-       leader = evsel->leader;
-       pr_debug("Weak group for %s/%d failed\n",
-                       leader->name, leader->nr_members);
-
-       /*
-        * for_each_group_member doesn't work here because it doesn't
-        * include the first entry.
-        */
-       evlist__for_each_entry(evsel_list, c2) {
-               if (c2 == evsel)
-                       is_open = false;
-               if (c2->leader == leader) {
-                       if (is_open)
-                               perf_evsel__close(c2);
-                       c2->leader = c2;
-                       c2->nr_members = 0;
-               }
-       }
-       return leader;
-}
-
 static bool is_target_alive(struct target *_target,
                            struct thread_map *threads)
 {
@@ -477,7 +451,7 @@ try_again:
                        if ((errno == EINVAL || errno == EBADF) &&
                            counter->leader != counter &&
                            counter->weak_group) {
-                               counter = perf_evsel__reset_weak_group(counter);
+                               counter = perf_evlist__reset_weak_group(evsel_list, counter);
                                goto try_again;
                        }
 
index b2838de13de02e29f6ad766eb8de11ec27bfa31b..aa0c73e5792404355c5e8c2de048ff8e5da18e39 100644 (file)
@@ -1429,6 +1429,9 @@ int cmd_top(int argc, const char **argv)
                }
        }
 
+       if (opts->branch_stack && callchain_param.enabled)
+               symbol_conf.show_branchflag_count = true;
+
        sort__mode = SORT_MODE__TOP;
        /* display thread wants entries to be collapsed in a different tree */
        perf_hpp_list.need_collapse = 1;
index dc8a6c4986ce2066b0e76cd58e0edb2f7f09852a..835619476370cc0ae1d43bae705597f819a4c732 100644 (file)
@@ -108,6 +108,7 @@ struct trace {
        } stats;
        unsigned int            max_stack;
        unsigned int            min_stack;
+       bool                    raw_augmented_syscalls;
        bool                    not_ev_qualifier;
        bool                    live;
        bool                    full_time;
@@ -1724,13 +1725,28 @@ static int trace__fprintf_sample(struct trace *trace, struct perf_evsel *evsel,
        return printed;
 }
 
-static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size)
+static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, bool raw_augmented)
 {
        void *augmented_args = NULL;
+       /*
+        * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
+        * and there we get all 6 syscall args plus the tracepoint common
+        * fields (sizeof(long)) and the syscall_nr (another long). So we check
+        * if that is the case and if so don't look after the sc->args_size,
+        * but always after the full raw_syscalls:sys_enter payload, which is
+        * fixed.
+        *
+        * We'll revisit this later to pass s->args_size to the BPF augmenter
+        * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
+        * copies only what we need for each syscall, like what happens when we
+        * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
+        * traffic to just what is needed for each syscall.
+        */
+       int args_size = raw_augmented ? (8 * (int)sizeof(long)) : sc->args_size;
 
-       *augmented_args_size = sample->raw_size - sc->args_size;
+       *augmented_args_size = sample->raw_size - args_size;
        if (*augmented_args_size > 0)
-               augmented_args = sample->raw_data + sc->args_size;
+               augmented_args = sample->raw_data + args_size;
 
        return augmented_args;
 }
@@ -1780,7 +1796,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
         * here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
         */
        if (evsel != trace->syscalls.events.sys_enter)
-               augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size);
+               augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls);
        ttrace->entry_time = sample->time;
        msg = ttrace->entry_str;
        printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
@@ -1833,7 +1849,7 @@ static int trace__fprintf_sys_enter(struct trace *trace, struct perf_evsel *evse
                goto out_put;
 
        args = perf_evsel__sc_tp_ptr(evsel, args, sample);
-       augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size);
+       augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls);
        syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
        fprintf(trace->output, "%s", msg);
        err = 0;
@@ -3501,7 +3517,15 @@ int cmd_trace(int argc, const char **argv)
                evsel->handler = trace__sys_enter;
 
                evlist__for_each_entry(trace.evlist, evsel) {
+                       bool raw_syscalls_sys_exit = strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
+
+                       if (raw_syscalls_sys_exit) {
+                               trace.raw_augmented_syscalls = true;
+                               goto init_augmented_syscall_tp;
+                       }
+
                        if (strstarts(perf_evsel__name(evsel), "syscalls:sys_exit_")) {
+init_augmented_syscall_tp:
                                perf_evsel__init_augmented_syscall_tp(evsel);
                                perf_evsel__init_augmented_syscall_tp_ret(evsel);
                                evsel->handler = trace__sys_exit;
diff --git a/tools/perf/examples/bpf/augmented_raw_syscalls.c b/tools/perf/examples/bpf/augmented_raw_syscalls.c
new file mode 100644 (file)
index 0000000..90a1933
--- /dev/null
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Augment the raw_syscalls tracepoints with the contents of the pointer arguments.
+ *
+ * Test it with:
+ *
+ * perf trace -e tools/perf/examples/bpf/augmented_raw_syscalls.c cat /etc/passwd > /dev/null
+ *
+ * This exactly matches what is marshalled into the raw_syscall:sys_enter
+ * payload expected by the 'perf trace' beautifiers.
+ *
+ * For now it just uses the existing tracepoint augmentation code in 'perf
+ * trace', in the next csets we'll hook up these with the sys_enter/sys_exit
+ * code that will combine entry/exit in a strace like way.
+ */
+
+#include <stdio.h>
+#include <linux/socket.h>
+
+/* bpf-output associated map */
+struct bpf_map SEC("maps") __augmented_syscalls__ = {
+       .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+       .key_size = sizeof(int),
+       .value_size = sizeof(u32),
+       .max_entries = __NR_CPUS__,
+};
+
+struct syscall_enter_args {
+       unsigned long long common_tp_fields;
+       long               syscall_nr;
+       unsigned long      args[6];
+};
+
+struct syscall_exit_args {
+       unsigned long long common_tp_fields;
+       long               syscall_nr;
+       long               ret;
+};
+
+struct augmented_filename {
+       unsigned int    size;
+       int             reserved;
+       char            value[256];
+};
+
+#define SYS_OPEN 2
+#define SYS_OPENAT 257
+
+SEC("raw_syscalls:sys_enter")
+int sys_enter(struct syscall_enter_args *args)
+{
+       struct {
+               struct syscall_enter_args args;
+               struct augmented_filename filename;
+       } augmented_args;
+       unsigned int len = sizeof(augmented_args);
+       const void *filename_arg = NULL;
+
+       probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
+       /*
+        * Yonghong and Edward Cree sayz:
+        *
+        * https://www.spinics.net/lists/netdev/msg531645.html
+        *
+        * >>   R0=inv(id=0) R1=inv2 R6=ctx(id=0,off=0,imm=0) R7=inv64 R10=fp0,call_-1
+        * >> 10: (bf) r1 = r6
+        * >> 11: (07) r1 += 16
+        * >> 12: (05) goto pc+2
+        * >> 15: (79) r3 = *(u64 *)(r1 +0)
+        * >> dereference of modified ctx ptr R1 off=16 disallowed
+        * > Aha, we at least got a different error message this time.
+        * > And indeed llvm has done that optimisation, rather than the more obvious
+        * > 11: r3 = *(u64 *)(r1 +16)
+        * > because it wants to have lots of reads share a single insn.  You may be able
+        * > to defeat that optimisation by adding compiler barriers, idk.  Maybe someone
+        * > with llvm knowledge can figure out how to stop it (ideally, llvm would know
+        * > when it's generating for bpf backend and not do that).  -O0?  ¯\_(ツ)_/¯
+        *
+        * The optimization mostly likes below:
+        *
+        *      br1:
+        *      ...
+        *      r1 += 16
+        *      goto merge
+        *      br2:
+        *      ...
+        *      r1 += 20
+        *      goto merge
+        *      merge:
+        *      *(u64 *)(r1 + 0)
+        *
+        * The compiler tries to merge common loads. There is no easy way to
+        * stop this compiler optimization without turning off a lot of other
+        * optimizations. The easiest way is to add barriers:
+        *
+        *       __asm__ __volatile__("": : :"memory")
+        *
+        *       after the ctx memory access to prevent their down stream merging.
+        */
+       switch (augmented_args.args.syscall_nr) {
+       case SYS_OPEN:   filename_arg = (const void *)args->args[0];
+                       __asm__ __volatile__("": : :"memory");
+                        break;
+       case SYS_OPENAT: filename_arg = (const void *)args->args[1];
+                        break;
+       }
+
+       if (filename_arg != NULL) {
+               augmented_args.filename.reserved = 0;
+               augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
+                                                             sizeof(augmented_args.filename.value),
+                                                             filename_arg);
+               if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
+                       len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
+                       len &= sizeof(augmented_args.filename.value) - 1;
+               }
+       } else {
+               len = sizeof(augmented_args.args);
+       }
+
+       perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
+       return 0;
+}
+
+SEC("raw_syscalls:sys_exit")
+int sys_exit(struct syscall_exit_args *args)
+{
+       return 1; /* 0 as soon as we start copying data returned by the kernel, e.g. 'read' */
+}
+
+license(GPL);
index ac1bcdc17dae7554f51a780b843605c441c6abbf..f7eb63cbbc655bdcebbd710ed38ce39a907db4bc 100644 (file)
@@ -125,7 +125,7 @@ perf_get_timestamp(void)
 }
 
 static int
-debug_cache_init(void)
+create_jit_cache_dir(void)
 {
        char str[32];
        char *base, *p;
@@ -144,8 +144,13 @@ debug_cache_init(void)
 
        strftime(str, sizeof(str), JIT_LANG"-jit-%Y%m%d", &tm);
 
-       snprintf(jit_path, PATH_MAX - 1, "%s/.debug/", base);
-
+       ret = snprintf(jit_path, PATH_MAX, "%s/.debug/", base);
+       if (ret >= PATH_MAX) {
+               warnx("jvmti: cannot generate jit cache dir because %s/.debug/"
+                       " is too long, please check the cwd, JITDUMPDIR, and"
+                       " HOME variables", base);
+               return -1;
+       }
        ret = mkdir(jit_path, 0755);
        if (ret == -1) {
                if (errno != EEXIST) {
@@ -154,20 +159,32 @@ debug_cache_init(void)
                }
        }
 
-       snprintf(jit_path, PATH_MAX - 1, "%s/.debug/jit", base);
+       ret = snprintf(jit_path, PATH_MAX, "%s/.debug/jit", base);
+       if (ret >= PATH_MAX) {
+               warnx("jvmti: cannot generate jit cache dir because"
+                       " %s/.debug/jit is too long, please check the cwd,"
+                       " JITDUMPDIR, and HOME variables", base);
+               return -1;
+       }
        ret = mkdir(jit_path, 0755);
        if (ret == -1) {
                if (errno != EEXIST) {
-                       warn("cannot create jit cache dir %s", jit_path);
+                       warn("jvmti: cannot create jit cache dir %s", jit_path);
                        return -1;
                }
        }
 
-       snprintf(jit_path, PATH_MAX - 1, "%s/.debug/jit/%s.XXXXXXXX", base, str);
-
+       ret = snprintf(jit_path, PATH_MAX, "%s/.debug/jit/%s.XXXXXXXX", base, str);
+       if (ret >= PATH_MAX) {
+               warnx("jvmti: cannot generate jit cache dir because"
+                       " %s/.debug/jit/%s.XXXXXXXX is too long, please check"
+                       " the cwd, JITDUMPDIR, and HOME variables",
+                       base, str);
+               return -1;
+       }
        p = mkdtemp(jit_path);
        if (p != jit_path) {
-               warn("cannot create jit cache dir %s", jit_path);
+               warn("jvmti: cannot create jit cache dir %s", jit_path);
                return -1;
        }
 
@@ -228,7 +245,7 @@ void *jvmti_open(void)
 {
        char dump_path[PATH_MAX];
        struct jitheader header;
-       int fd;
+       int fd, ret;
        FILE *fp;
 
        init_arch_timestamp();
@@ -245,12 +262,22 @@ void *jvmti_open(void)
 
        memset(&header, 0, sizeof(header));
 
-       debug_cache_init();
+       /*
+        * jitdump file dir
+        */
+       if (create_jit_cache_dir() < 0)
+               return NULL;
 
        /*
         * jitdump file name
         */
-       scnprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
+       ret = snprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
+       if (ret >= PATH_MAX) {
+               warnx("jvmti: cannot generate jitdump file full path because"
+                       " %s/jit-%i.dump is too long, please check the cwd,"
+                       " JITDUMPDIR, and HOME variables", jit_path, getpid());
+               return NULL;
+       }
 
        fd = open(dump_path, O_CREAT|O_TRUNC|O_RDWR, 0666);
        if (fd == -1)
index 24cb0bd56afa56bb4f0c4d48cefa4f7446681c66..f278ce5ebab76640de4eb61ee8661040d0ac092a 100755 (executable)
@@ -119,6 +119,14 @@ def dsoname(name):
                return "[kernel]"
        return name
 
+def findnth(s, sub, n, offs=0):
+       pos = s.find(sub)
+       if pos < 0:
+               return pos
+       if n <= 1:
+               return offs + pos
+       return findnth(s[pos + 1:], sub, n - 1, offs + pos + 1)
+
 # Percent to one decimal place
 
 def PercentToOneDP(n, d):
@@ -1464,6 +1472,317 @@ class BranchWindow(QMdiSubWindow):
                else:
                        self.find_bar.NotFound()
 
+# Dialog data item converted and validated using a SQL table
+
+class SQLTableDialogDataItem():
+
+       def __init__(self, glb, label, placeholder_text, table_name, match_column, column_name1, column_name2, parent):
+               self.glb = glb
+               self.label = label
+               self.placeholder_text = placeholder_text
+               self.table_name = table_name
+               self.match_column = match_column
+               self.column_name1 = column_name1
+               self.column_name2 = column_name2
+               self.parent = parent
+
+               self.value = ""
+
+               self.widget = QLineEdit()
+               self.widget.editingFinished.connect(self.Validate)
+               self.widget.textChanged.connect(self.Invalidate)
+               self.red = False
+               self.error = ""
+               self.validated = True
+
+               self.last_id = 0
+               self.first_time = 0
+               self.last_time = 2 ** 64
+               if self.table_name == "<timeranges>":
+                       query = QSqlQuery(self.glb.db)
+                       QueryExec(query, "SELECT id, time FROM samples ORDER BY id DESC LIMIT 1")
+                       if query.next():
+                               self.last_id = int(query.value(0))
+                               self.last_time = int(query.value(1))
+                       QueryExec(query, "SELECT time FROM samples WHERE time != 0 ORDER BY id LIMIT 1")
+                       if query.next():
+                               self.first_time = int(query.value(0))
+                       if placeholder_text:
+                               placeholder_text += ", between " + str(self.first_time) + " and " + str(self.last_time)
+
+               if placeholder_text:
+                       self.widget.setPlaceholderText(placeholder_text)
+
+       def ValueToIds(self, value):
+               ids = []
+               query = QSqlQuery(self.glb.db)
+               stmt = "SELECT id FROM " + self.table_name + " WHERE " + self.match_column + " = '" + value + "'"
+               ret = query.exec_(stmt)
+               if ret:
+                       while query.next():
+                               ids.append(str(query.value(0)))
+               return ids
+
+       def IdBetween(self, query, lower_id, higher_id, order):
+               QueryExec(query, "SELECT id FROM samples WHERE id > " + str(lower_id) + " AND id < " + str(higher_id) + " ORDER BY id " + order + " LIMIT 1")
+               if query.next():
+                       return True, int(query.value(0))
+               else:
+                       return False, 0
+
+       def BinarySearchTime(self, lower_id, higher_id, target_time, get_floor):
+               query = QSqlQuery(self.glb.db)
+               while True:
+                       next_id = int((lower_id + higher_id) / 2)
+                       QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
+                       if not query.next():
+                               ok, dbid = self.IdBetween(query, lower_id, next_id, "DESC")
+                               if not ok:
+                                       ok, dbid = self.IdBetween(query, next_id, higher_id, "")
+                                       if not ok:
+                                               return str(higher_id)
+                               next_id = dbid
+                               QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
+                       next_time = int(query.value(0))
+                       if get_floor:
+                               if target_time > next_time:
+                                       lower_id = next_id
+                               else:
+                                       higher_id = next_id
+                               if higher_id <= lower_id + 1:
+                                       return str(higher_id)
+                       else:
+                               if target_time >= next_time:
+                                       lower_id = next_id
+                               else:
+                                       higher_id = next_id
+                               if higher_id <= lower_id + 1:
+                                       return str(lower_id)
+
+       def ConvertRelativeTime(self, val):
+               print "val ", val
+               mult = 1
+               suffix = val[-2:]
+               if suffix == "ms":
+                       mult = 1000000
+               elif suffix == "us":
+                       mult = 1000
+               elif suffix == "ns":
+                       mult = 1
+               else:
+                       return val
+               val = val[:-2].strip()
+               if not self.IsNumber(val):
+                       return val
+               val = int(val) * mult
+               if val >= 0:
+                       val += self.first_time
+               else:
+                       val += self.last_time
+               return str(val)
+
+       def ConvertTimeRange(self, vrange):
+               print "vrange ", vrange
+               if vrange[0] == "":
+                       vrange[0] = str(self.first_time)
+               if vrange[1] == "":
+                       vrange[1] = str(self.last_time)
+               vrange[0] = self.ConvertRelativeTime(vrange[0])
+               vrange[1] = self.ConvertRelativeTime(vrange[1])
+               print "vrange2 ", vrange
+               if not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
+                       return False
+               print "ok1"
+               beg_range = max(int(vrange[0]), self.first_time)
+               end_range = min(int(vrange[1]), self.last_time)
+               if beg_range > self.last_time or end_range < self.first_time:
+                       return False
+               print "ok2"
+               vrange[0] = self.BinarySearchTime(0, self.last_id, beg_range, True)
+               vrange[1] = self.BinarySearchTime(1, self.last_id + 1, end_range, False)
+               print "vrange3 ", vrange
+               return True
+
+       def AddTimeRange(self, value, ranges):
+               print "value ", value
+               n = value.count("-")
+               if n == 1:
+                       pass
+               elif n == 2:
+                       if value.split("-")[1].strip() == "":
+                               n = 1
+               elif n == 3:
+                       n = 2
+               else:
+                       return False
+               pos = findnth(value, "-", n)
+               vrange = [value[:pos].strip() ,value[pos+1:].strip()]
+               if self.ConvertTimeRange(vrange):
+                       ranges.append(vrange)
+                       return True
+               return False
+
+       def InvalidValue(self, value):
+               self.value = ""
+               palette = QPalette()
+               palette.setColor(QPalette.Text,Qt.red)
+               self.widget.setPalette(palette)
+               self.red = True
+               self.error = self.label + " invalid value '" + value + "'"
+               self.parent.ShowMessage(self.error)
+
+       def IsNumber(self, value):
+               try:
+                       x = int(value)
+               except:
+                       x = 0
+               return str(x) == value
+
+       def Invalidate(self):
+               self.validated = False
+
+       def Validate(self):
+               input_string = self.widget.text()
+               self.validated = True
+               if self.red:
+                       palette = QPalette()
+                       self.widget.setPalette(palette)
+                       self.red = False
+               if not len(input_string.strip()):
+                       self.error = ""
+                       self.value = ""
+                       return
+               if self.table_name == "<timeranges>":
+                       ranges = []
+                       for value in [x.strip() for x in input_string.split(",")]:
+                               if not self.AddTimeRange(value, ranges):
+                                       return self.InvalidValue(value)
+                       ranges = [("(" + self.column_name1 + " >= " + r[0] + " AND " + self.column_name1 + " <= " + r[1] + ")") for r in ranges]
+                       self.value = " OR ".join(ranges)
+               elif self.table_name == "<ranges>":
+                       singles = []
+                       ranges = []
+                       for value in [x.strip() for x in input_string.split(",")]:
+                               if "-" in value:
+                                       vrange = value.split("-")
+                                       if len(vrange) != 2 or not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
+                                               return self.InvalidValue(value)
+                                       ranges.append(vrange)
+                               else:
+                                       if not self.IsNumber(value):
+                                               return self.InvalidValue(value)
+                                       singles.append(value)
+                       ranges = [("(" + self.column_name1 + " >= " + r[0] + " AND " + self.column_name1 + " <= " + r[1] + ")") for r in ranges]
+                       if len(singles):
+                               ranges.append(self.column_name1 + " IN (" + ",".join(singles) + ")")
+                       self.value = " OR ".join(ranges)
+               elif self.table_name:
+                       all_ids = []
+                       for value in [x.strip() for x in input_string.split(",")]:
+                               ids = self.ValueToIds(value)
+                               if len(ids):
+                                       all_ids.extend(ids)
+                               else:
+                                       return self.InvalidValue(value)
+                       self.value = self.column_name1 + " IN (" + ",".join(all_ids) + ")"
+                       if self.column_name2:
+                               self.value = "( " + self.value + " OR " + self.column_name2 + " IN (" + ",".join(all_ids) + ") )"
+               else:
+                       self.value = input_string.strip()
+               self.error = ""
+               self.parent.ClearMessage()
+
+       def IsValid(self):
+               if not self.validated:
+                       self.Validate()
+               if len(self.error):
+                       self.parent.ShowMessage(self.error)
+                       return False
+               return True
+
+# Selected branch report creation dialog
+
+class SelectedBranchDialog(QDialog):
+
+       def __init__(self, glb, parent=None):
+               super(SelectedBranchDialog, self).__init__(parent)
+
+               self.glb = glb
+
+               self.name = ""
+               self.where_clause = ""
+
+               self.setWindowTitle("Selected Branches")
+               self.setMinimumWidth(600)
+
+               items = (
+                       ("Report name:", "Enter a name to appear in the window title bar", "", "", "", ""),
+                       ("Time ranges:", "Enter time ranges", "<timeranges>", "", "samples.id", ""),
+                       ("CPUs:", "Enter CPUs or ranges e.g. 0,5-6", "<ranges>", "", "cpu", ""),
+                       ("Commands:", "Only branches with these commands will be included", "comms", "comm", "comm_id", ""),
+                       ("PIDs:", "Only branches with these process IDs will be included", "threads", "pid", "thread_id", ""),
+                       ("TIDs:", "Only branches with these thread IDs will be included", "threads", "tid", "thread_id", ""),
+                       ("DSOs:", "Only branches with these DSOs will be included", "dsos", "short_name", "samples.dso_id", "to_dso_id"),
+                       ("Symbols:", "Only branches with these symbols will be included", "symbols", "name", "symbol_id", "to_symbol_id"),
+                       ("Raw SQL clause: ", "Enter a raw SQL WHERE clause", "", "", "", ""),
+                       )
+               self.data_items = [SQLTableDialogDataItem(glb, *x, parent=self) for x in items]
+
+               self.grid = QGridLayout()
+
+               for row in xrange(len(self.data_items)):
+                       self.grid.addWidget(QLabel(self.data_items[row].label), row, 0)
+                       self.grid.addWidget(self.data_items[row].widget, row, 1)
+
+               self.status = QLabel()
+
+               self.ok_button = QPushButton("Ok", self)
+               self.ok_button.setDefault(True)
+               self.ok_button.released.connect(self.Ok)
+               self.ok_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
+
+               self.cancel_button = QPushButton("Cancel", self)
+               self.cancel_button.released.connect(self.reject)
+               self.cancel_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
+
+               self.hbox = QHBoxLayout()
+               #self.hbox.addStretch()
+               self.hbox.addWidget(self.status)
+               self.hbox.addWidget(self.ok_button)
+               self.hbox.addWidget(self.cancel_button)
+
+               self.vbox = QVBoxLayout()
+               self.vbox.addLayout(self.grid)
+               self.vbox.addLayout(self.hbox)
+
+               self.setLayout(self.vbox);
+
+       def Ok(self):
+               self.name = self.data_items[0].value
+               if not self.name:
+                       self.ShowMessage("Report name is required")
+                       return
+               for d in self.data_items:
+                       if not d.IsValid():
+                               return
+               for d in self.data_items[1:]:
+                       if len(d.value):
+                               if len(self.where_clause):
+                                       self.where_clause += " AND "
+                               self.where_clause += d.value
+               if len(self.where_clause):
+                       self.where_clause = " AND ( " + self.where_clause + " ) "
+               else:
+                       self.ShowMessage("No selection")
+                       return
+               self.accept()
+
+       def ShowMessage(self, msg):
+               self.status.setText("<font color=#FF0000>" + msg)
+
+       def ClearMessage(self):
+               self.status.setText("")
+
 # Event list
 
 def GetEventList(db):
@@ -1656,7 +1975,7 @@ class TableWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
        def FindDone(self, row):
                self.find_bar.Idle()
                if row >= 0:
-                       self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex()))
+                       self.view.setCurrentIndex(self.model.mapFromSource(self.data_model.index(row, 0, QModelIndex())))
                else:
                        self.find_bar.NotFound()
 
@@ -1765,6 +2084,149 @@ class WindowMenu():
        def setActiveSubWindow(self, nr):
                self.mdi_area.setActiveSubWindow(self.mdi_area.subWindowList()[nr - 1])
 
+# Help text
+
+glb_help_text = """
+<h1>Contents</h1>
+<style>
+p.c1 {
+    text-indent: 40px;
+}
+p.c2 {
+    text-indent: 80px;
+}
+}
+</style>
+<p class=c1><a href=#reports>1. Reports</a></p>
+<p class=c2><a href=#callgraph>1.1 Context-Sensitive Call Graph</a></p>
+<p class=c2><a href=#allbranches>1.2 All branches</a></p>
+<p class=c2><a href=#selectedbranches>1.3 Selected branches</a></p>
+<p class=c1><a href=#tables>2. Tables</a></p>
+<h1 id=reports>1. Reports</h1>
+<h2 id=callgraph>1.1 Context-Sensitive Call Graph</h2>
+The result is a GUI window with a tree representing a context-sensitive
+call-graph. Expanding a couple of levels of the tree and adjusting column
+widths to suit will display something like:
+<pre>
+                                         Call Graph: pt_example
+Call Path                          Object      Count   Time(ns)  Time(%)  Branch Count   Branch Count(%)
+v- ls
+    v- 2638:2638
+        v- _start                  ld-2.19.so    1     10074071   100.0         211135            100.0
+          |- unknown               unknown       1        13198     0.1              1              0.0
+          >- _dl_start             ld-2.19.so    1      1400980    13.9          19637              9.3
+          >- _d_linit_internal     ld-2.19.so    1       448152     4.4          11094              5.3
+          v-__libc_start_main@plt  ls            1      8211741    81.5         180397             85.4
+             >- _dl_fixup          ld-2.19.so    1         7607     0.1            108              0.1
+             >- __cxa_atexit       libc-2.19.so  1        11737     0.1             10              0.0
+             >- __libc_csu_init    ls            1        10354     0.1             10              0.0
+             |- _setjmp            libc-2.19.so  1            0     0.0              4              0.0
+             v- main               ls            1      8182043    99.6         180254             99.9
+</pre>
+<h3>Points to note:</h3>
+<ul>
+<li>The top level is a command name (comm)</li>
+<li>The next level is a thread (pid:tid)</li>
+<li>Subsequent levels are functions</li>
+<li>'Count' is the number of calls</li>
+<li>'Time' is the elapsed time until the function returns</li>
+<li>Percentages are relative to the level above</li>
+<li>'Branch Count' is the total number of branches for that function and all functions that it calls
+</ul>
+<h3>Find</h3>
+Ctrl-F displays a Find bar which finds function names by either an exact match or a pattern match.
+The pattern matching symbols are ? for any character and * for zero or more characters.
+<h2 id=allbranches>1.2 All branches</h2>
+The All branches report displays all branches in chronological order.
+Not all data is fetched immediately. More records can be fetched using the Fetch bar provided.
+<h3>Disassembly</h3>
+Open a branch to display disassembly. This only works if:
+<ol>
+<li>The disassembler is available. Currently, only Intel XED is supported - see <a href=#xed>Intel XED Setup</a></li>
+<li>The object code is available. Currently, only the perf build ID cache is searched for object code.
+The default directory ~/.debug can be overridden by setting environment variable PERF_BUILDID_DIR.
+One exception is kcore where the DSO long name is used (refer dsos_view on the Tables menu),
+or alternatively, set environment variable PERF_KCORE to the kcore file name.</li>
+</ol>
+<h4 id=xed>Intel XED Setup</h4>
+To use Intel XED, libxed.so must be present.  To build and install libxed.so:
+<pre>
+git clone https://github.com/intelxed/mbuild.git mbuild
+git clone https://github.com/intelxed/xed
+cd xed
+./mfile.py --share
+sudo ./mfile.py --prefix=/usr/local install
+sudo ldconfig
+</pre>
+<h3>Find</h3>
+Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
+Refer to Python documentation for the regular expression syntax.
+All columns are searched, but only currently fetched rows are searched.
+<h2 id=selectedbranches>1.3 Selected branches</h2>
+This is the same as the <a href=#allbranches>All branches</a> report but with the data reduced
+by various selection criteria. A dialog box displays available criteria which are AND'ed together.
+<h3>1.3.1 Time ranges</h3>
+The time ranges hint text shows the total time range. Relative time ranges can also be entered in
+ms, us or ns. Also, negative values are relative to the end of trace.  Examples:
+<pre>
+       81073085947329-81073085958238   From 81073085947329 to 81073085958238
+       100us-200us             From 100us to 200us
+       10ms-                   From 10ms to the end
+       -100ns                  The first 100ns
+       -10ms-                  The last 10ms
+</pre>
+N.B. Due to the granularity of timestamps, there could be no branches in any given time range.
+<h1 id=tables>2. Tables</h1>
+The Tables menu shows all tables and views in the database. Most tables have an associated view
+which displays the information in a more friendly way. Not all data for large tables is fetched
+immediately. More records can be fetched using the Fetch bar provided. Columns can be sorted,
+but that can be slow for large tables.
+<p>There are also tables of database meta-information.
+For SQLite3 databases, the sqlite_master table is included.
+For PostgreSQL databases, information_schema.tables/views/columns are included.
+<h3>Find</h3>
+Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
+Refer to Python documentation for the regular expression syntax.
+All columns are searched, but only currently fetched rows are searched.
+<p>N.B. Results are found in id order, so if the table is re-ordered, find-next and find-previous
+will go to the next/previous result in id order, instead of display order.
+"""
+
+# Help window
+
+class HelpWindow(QMdiSubWindow):
+
+       def __init__(self, glb, parent=None):
+               super(HelpWindow, self).__init__(parent)
+
+               self.text = QTextBrowser()
+               self.text.setHtml(glb_help_text)
+               self.text.setReadOnly(True)
+               self.text.setOpenExternalLinks(True)
+
+               self.setWidget(self.text)
+
+               AddSubWindow(glb.mainwindow.mdi_area, self, "Exported SQL Viewer Help")
+
+# Main window that only displays the help text
+
+class HelpOnlyWindow(QMainWindow):
+
+       def __init__(self, parent=None):
+               super(HelpOnlyWindow, self).__init__(parent)
+
+               self.setMinimumSize(200, 100)
+               self.resize(800, 600)
+               self.setWindowTitle("Exported SQL Viewer Help")
+               self.setWindowIcon(self.style().standardIcon(QStyle.SP_MessageBoxInformation))
+
+               self.text = QTextBrowser()
+               self.text.setHtml(glb_help_text)
+               self.text.setReadOnly(True)
+               self.text.setOpenExternalLinks(True)
+
+               self.setCentralWidget(self.text)
+
 # Font resize
 
 def ResizeFont(widget, diff):
@@ -1851,6 +2313,9 @@ class MainWindow(QMainWindow):
 
                self.window_menu = WindowMenu(self.mdi_area, menu)
 
+               help_menu = menu.addMenu("&Help")
+               help_menu.addAction(CreateAction("&Exported SQL Viewer Help", "Helpful information", self.Help, self, QKeySequence.HelpContents))
+
        def Find(self):
                win = self.mdi_area.activeSubWindow()
                if win:
@@ -1888,6 +2353,8 @@ class MainWindow(QMainWindow):
                        if event == "branches":
                                label = "All branches" if branches_events == 1 else "All branches " + "(id=" + dbid + ")"
                                reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda x=dbid: self.NewBranchView(x), self))
+                               label = "Selected branches" if branches_events == 1 else "Selected branches " + "(id=" + dbid + ")"
+                               reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda x=dbid: self.NewSelectedBranchView(x), self))
 
        def TableMenu(self, tables, menu):
                table_menu = menu.addMenu("&Tables")
@@ -1900,9 +2367,18 @@ class MainWindow(QMainWindow):
        def NewBranchView(self, event_id):
                BranchWindow(self.glb, event_id, "", "", self)
 
+       def NewSelectedBranchView(self, event_id):
+               dialog = SelectedBranchDialog(self.glb, self)
+               ret = dialog.exec_()
+               if ret:
+                       BranchWindow(self.glb, event_id, dialog.name, dialog.where_clause, self)
+
        def NewTableView(self, table_name):
                TableWindow(self.glb, table_name, self)
 
+       def Help(self):
+               HelpWindow(self.glb, self)
+
 # XED Disassembler
 
 class xed_state_t(Structure):
@@ -1929,7 +2405,12 @@ class XEDInstruction():
 class LibXED():
 
        def __init__(self):
-               self.libxed = CDLL("libxed.so")
+               try:
+                       self.libxed = CDLL("libxed.so")
+               except:
+                       self.libxed = None
+               if not self.libxed:
+                       self.libxed = CDLL("/usr/local/lib/libxed.so")
 
                self.xed_tables_init = self.libxed.xed_tables_init
                self.xed_tables_init.restype = None
@@ -2097,10 +2578,16 @@ class DBRef():
 
 def Main():
        if (len(sys.argv) < 2):
-               print >> sys.stderr, "Usage is: exported-sql-viewer.py <database name>"
+               print >> sys.stderr, "Usage is: exported-sql-viewer.py {<database name> | --help-only}"
                raise Exception("Too few arguments")
 
        dbname = sys.argv[1]
+       if dbname == "--help-only":
+               app = QApplication(sys.argv)
+               mainwindow = HelpOnlyWindow()
+               mainwindow.show()
+               err = app.exec_()
+               sys.exit(err)
 
        is_sqlite3 = False
        try:
index 8a33ca4f9e1f7feed87159d755ba3b4797a987b6..f0729c454f160bed941b16133b9ac437c973404d 100644 (file)
@@ -37,4 +37,3 @@ sample_freq=0
 sample_period=0
 freq=0
 write_backward=0
-sample_id_all=0
index e88e6f9b1463f0674a0eaf12423b35978e58d8da..668d2a9ef0f4b698231c7ad0388210175f0f8dab 100644 (file)
@@ -1810,3 +1810,30 @@ void perf_evlist__force_leader(struct perf_evlist *evlist)
                leader->forced_leader = true;
        }
 }
+
+struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evsel_list,
+                                                struct perf_evsel *evsel)
+{
+       struct perf_evsel *c2, *leader;
+       bool is_open = true;
+
+       leader = evsel->leader;
+       pr_debug("Weak group for %s/%d failed\n",
+                       leader->name, leader->nr_members);
+
+       /*
+        * for_each_group_member doesn't work here because it doesn't
+        * include the first entry.
+        */
+       evlist__for_each_entry(evsel_list, c2) {
+               if (c2 == evsel)
+                       is_open = false;
+               if (c2->leader == leader) {
+                       if (is_open)
+                               perf_evsel__close(c2);
+                       c2->leader = c2;
+                       c2->nr_members = 0;
+               }
+       }
+       return leader;
+}
index dc66436add98a3c795efa3ddf0889f09f1d7abe3..9919eed6d15bc1994844187d9b55e69b746312cc 100644 (file)
@@ -312,4 +312,7 @@ bool perf_evlist__exclude_kernel(struct perf_evlist *evlist);
 
 void perf_evlist__force_leader(struct perf_evlist *evlist);
 
+struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evlist,
+                                                struct perf_evsel *evsel);
+
 #endif /* __PERF_EVLIST_H */
index 6d187059a37360ae669220c913c535b1278d6514..d37bb1566cd9da7eb0875f83dcfa46edce36a552 100644 (file)
@@ -956,7 +956,6 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
                attr->sample_freq    = 0;
                attr->sample_period  = 0;
                attr->write_backward = 0;
-               attr->sample_id_all  = 0;
        }
 
        if (opts->no_samples)
index 58f6a9ceb5909c1007c4e7a95aa92230da6fff41..4503f3ca45ab48d7305260cf36e51800eecaf089 100644 (file)
@@ -1474,6 +1474,8 @@ static void intel_pt_calc_mtc_timestamp(struct intel_pt_decoder *decoder)
                decoder->have_calc_cyc_to_tsc = false;
                intel_pt_calc_cyc_to_tsc(decoder, true);
        }
+
+       intel_pt_log_to("Setting timestamp", decoder->timestamp);
 }
 
 static void intel_pt_calc_cbr(struct intel_pt_decoder *decoder)
@@ -1514,6 +1516,8 @@ static void intel_pt_calc_cyc_timestamp(struct intel_pt_decoder *decoder)
                decoder->timestamp = timestamp;
 
        decoder->timestamp_insn_cnt = 0;
+
+       intel_pt_log_to("Setting timestamp", decoder->timestamp);
 }
 
 /* Walk PSB+ packets when already in sync. */
index e02bc7b166a0e48f1cf537606a71ab43875c6de8..5e64da270f97684b7f42c56079a04a2653a1b890 100644 (file)
@@ -31,6 +31,11 @@ static FILE *f;
 static char log_name[MAX_LOG_NAME];
 bool intel_pt_enable_logging;
 
+void *intel_pt_log_fp(void)
+{
+       return f;
+}
+
 void intel_pt_log_enable(void)
 {
        intel_pt_enable_logging = true;
index 45b64f93f358898c6fb8d5caca92b37cd70f2e2e..cc084937f701acfa16bc3eb3623aab7e6ed429d2 100644 (file)
@@ -22,6 +22,7 @@
 
 struct intel_pt_pkt;
 
+void *intel_pt_log_fp(void);
 void intel_pt_log_enable(void);
 void intel_pt_log_disable(void);
 void intel_pt_log_set_name(const char *name);
index 86cc9a64e982773408e2d51cc25a51a82c396ff5..149ff361ca789e460cd896beaa439ff7b225c2a2 100644 (file)
@@ -206,6 +206,16 @@ static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
        intel_pt_dump(pt, buf, len);
 }
 
+static void intel_pt_log_event(union perf_event *event)
+{
+       FILE *f = intel_pt_log_fp();
+
+       if (!intel_pt_enable_logging || !f)
+               return;
+
+       perf_event__fprintf(event, f);
+}
+
 static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
                                   struct auxtrace_buffer *b)
 {
@@ -2010,9 +2020,9 @@ static int intel_pt_process_event(struct perf_session *session,
                 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
                err = intel_pt_context_switch(pt, event, sample);
 
-       intel_pt_log("event %s (%u): cpu %d time %"PRIu64" tsc %#"PRIx64"\n",
-                    perf_event__name(event->header.type), event->header.type,
-                    sample->cpu, sample->time, timestamp);
+       intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
+                    event->header.type, sample->cpu, sample->time, timestamp);
+       intel_pt_log_event(event);
 
        return err;
 }
index 7799788f662fdc05765915b383d13085f2a932ac..7e49baad304d78815966a6ac918f472fd817620b 100644 (file)
@@ -773,7 +773,7 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
 
                if (!is_arm_pmu_core(name)) {
                        pname = pe->pmu ? pe->pmu : "cpu";
-                       if (strncmp(pname, name, strlen(pname)))
+                       if (strcmp(pname, name))
                                continue;
                }
 
index 9527d47a1070ecbabc716273d6e46f34e18c1ca1..01ec04bf91b592e470c26512e744d5e59211a3d4 100644 (file)
@@ -140,8 +140,8 @@ static u32 handle[] = {
        [6] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 1),
 };
 
-static unsigned long dimm_fail_cmd_flags[NUM_DCR];
-static int dimm_fail_cmd_code[NUM_DCR];
+static unsigned long dimm_fail_cmd_flags[ARRAY_SIZE(handle)];
+static int dimm_fail_cmd_code[ARRAY_SIZE(handle)];
 
 static const struct nd_intel_smart smart_def = {
        .flags = ND_INTEL_SMART_HEALTH_VALID
@@ -205,7 +205,7 @@ struct nfit_test {
                unsigned long deadline;
                spinlock_t lock;
        } ars_state;
-       struct device *dimm_dev[NUM_DCR];
+       struct device *dimm_dev[ARRAY_SIZE(handle)];
        struct nd_intel_smart *smart;
        struct nd_intel_smart_threshold *smart_threshold;
        struct badrange badrange;
@@ -2680,7 +2680,7 @@ static int nfit_test_probe(struct platform_device *pdev)
                u32 nfit_handle = __to_nfit_memdev(nfit_mem)->device_handle;
                int i;
 
-               for (i = 0; i < NUM_DCR; i++)
+               for (i = 0; i < ARRAY_SIZE(handle); i++)
                        if (nfit_handle == handle[i])
                                dev_set_drvdata(nfit_test->dimm_dev[i],
                                                nfit_mem);
index 1b0e9e9a2ddce5a3377ded7dad656202586339dc..f2fa101c5a6ac149bd93c4f8140aed76798b8e69 100644 (file)
@@ -47,8 +47,9 @@ static int ok(void)
        return 0;
 }
 
-#define REG_POISON     0x5a5aUL
-#define POISONED_REG(n)        ((REG_POISON << 48) | ((n) << 32) | (REG_POISON << 16) | (n))
+#define REG_POISON     0x5a5a
+#define POISONED_REG(n)        ((((unsigned long)REG_POISON) << 48) | ((n) << 32) | \
+                        (((unsigned long)REG_POISON) << 16) | (n))
 
 static inline void poison_regs(void)
 {
@@ -105,6 +106,20 @@ static void dump_regs(void)
        }
 }
 
+#ifdef _CALL_AIXDESC
+struct opd {
+       unsigned long ip;
+       unsigned long toc;
+       unsigned long env;
+};
+static struct opd bad_opd = {
+       .ip = BAD_NIP,
+};
+#define BAD_FUNC (&bad_opd)
+#else
+#define BAD_FUNC BAD_NIP
+#endif
+
 int test_wild_bctr(void)
 {
        int (*func_ptr)(void);
@@ -133,7 +148,7 @@ int test_wild_bctr(void)
 
                poison_regs();
 
-               func_ptr = (int (*)(void))BAD_NIP;
+               func_ptr = (int (*)(void))BAD_FUNC;
                func_ptr();
 
                FAIL_IF(1); /* we didn't segv? */