Merge tag 'icc-5.2-rc1' of https://git.linaro.org/people/georgi.djakov/linux into...
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 25 Apr 2019 17:36:09 +0000 (19:36 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 25 Apr 2019 17:36:09 +0000 (19:36 +0200)
Georgi writes:

interconnect: for 5.2

Here are some tiny patches for the 5.2-rc1 merge window:

- Add linux-pm@ as a mailing list for the interconnect API.
- Use DEFINE_SHOW_ATTRIBUTE macro to simplify the code.

Signed-off-by: Georgi Djakov <georgi.djakov@linaro.org>
* tag 'icc-5.2-rc1' of https://git.linaro.org/people/georgi.djakov/linux:
  interconnect: convert to DEFINE_SHOW_ATTRIBUTE
  MAINTAINERS: Add mailing list for the interconnect API

945 files changed:
.clang-format
.mailmap
Documentation/ABI/testing/sysfs-class-mei
Documentation/accounting/psi.txt
Documentation/bpf/btf.rst
Documentation/devicetree/bindings/arm/cpus.yaml
Documentation/devicetree/bindings/hwmon/adc128d818.txt
Documentation/devicetree/bindings/misc/aspeed-p2a-ctrl.txt [new file with mode: 0644]
Documentation/lzo.txt
Documentation/media/uapi/rc/rc-tables.rst
Documentation/networking/bpf_flow_dissector.rst [new file with mode: 0644]
Documentation/networking/index.rst
Documentation/networking/rxrpc.txt
MAINTAINERS
Makefile
arch/arc/include/asm/syscall.h
arch/arm/boot/dts/am335x-evm.dts
arch/arm/boot/dts/am335x-evmsk.dts
arch/arm/boot/dts/am33xx-l4.dtsi
arch/arm/boot/dts/rk3288-tinker.dtsi
arch/arm/boot/dts/rk3288-veyron.dtsi
arch/arm/boot/dts/rk3288.dtsi
arch/arm/boot/dts/sama5d2-pinfunc.h
arch/arm/boot/dts/ste-nomadik-nhk15.dts
arch/arm/include/asm/syscall.h
arch/arm/mach-at91/pm.c
arch/arm/mach-iop13xx/setup.c
arch/arm/mach-iop13xx/tpmi.c
arch/arm/mach-milbeaut/platsmp.c
arch/arm/mach-omap1/board-ams-delta.c
arch/arm/mach-omap2/display.c
arch/arm/plat-iop/adma.c
arch/arm/plat-orion/common.c
arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
arch/arm64/boot/dts/rockchip/rk3328.dtsi
arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts
arch/arm64/include/asm/futex.h
arch/arm64/include/asm/module.h
arch/arm64/include/asm/syscall.h
arch/arm64/kernel/ftrace.c
arch/arm64/kernel/sdei.c
arch/arm64/kernel/traps.c
arch/c6x/include/asm/syscall.h
arch/csky/include/asm/syscall.h
arch/h8300/include/asm/syscall.h
arch/hexagon/include/asm/syscall.h
arch/ia64/include/asm/syscall.h
arch/ia64/kernel/ptrace.c
arch/microblaze/include/asm/syscall.h
arch/mips/configs/generic/board-ocelot.config
arch/mips/include/asm/syscall.h
arch/mips/kernel/kgdb.c
arch/mips/kernel/ptrace.c
arch/mips/sgi-ip27/ip27-irq.c
arch/nds32/include/asm/syscall.h
arch/nios2/include/asm/syscall.h
arch/openrisc/include/asm/syscall.h
arch/parisc/include/asm/ptrace.h
arch/parisc/include/asm/syscall.h
arch/parisc/kernel/process.c
arch/parisc/kernel/setup.c
arch/powerpc/include/asm/mmu.h
arch/powerpc/include/asm/syscall.h
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/head_32.S
arch/powerpc/kernel/kvm.c
arch/powerpc/kernel/vdso32/gettimeofday.S
arch/riscv/configs/rv32_defconfig [new file with mode: 0644]
arch/riscv/include/asm/fixmap.h
arch/riscv/include/asm/syscall.h
arch/riscv/include/asm/uaccess.h
arch/riscv/kernel/Makefile
arch/riscv/kernel/module.c
arch/riscv/kernel/setup.c
arch/riscv/mm/Makefile
arch/riscv/mm/init.c
arch/s390/boot/mem_detect.c
arch/s390/include/asm/syscall.h
arch/s390/kernel/fpu.c
arch/s390/kernel/vtime.c
arch/sh/boards/of-generic.c
arch/sh/include/asm/syscall_32.h
arch/sh/include/asm/syscall_64.h
arch/sparc/include/asm/syscall.h
arch/sparc/kernel/pci_sun4v.c
arch/um/include/asm/syscall-generic.h
arch/x86/Kconfig
arch/x86/crypto/poly1305-avx2-x86_64.S
arch/x86/crypto/poly1305-sse2-x86_64.S
arch/x86/events/amd/core.c
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/events/perf_event.h
arch/x86/include/asm/bitops.h
arch/x86/include/asm/kvm_emulate.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/syscall.h
arch/x86/include/asm/xen/hypercall.h
arch/x86/include/uapi/asm/vmx.h
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/resctrl/rdtgroup.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/process.c
arch/x86/kernel/reboot.c
arch/x86/kernel/vmlinux.lds.S
arch/x86/kvm/emulate.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu.h
arch/x86/kvm/pmu.c
arch/x86/kvm/svm.c
arch/x86/kvm/trace.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/mm/dump_pagetables.c
arch/x86/mm/ioremap.c
arch/x86/mm/kaslr.c
arch/x86/mm/tlb.c
arch/xtensa/include/asm/processor.h
arch/xtensa/include/asm/syscall.h
arch/xtensa/kernel/entry.S
arch/xtensa/kernel/stacktrace.c
arch/xtensa/mm/mmu.c
block/bfq-iosched.c
block/bfq-iosched.h
block/bfq-wf2q.c
block/bio.c
block/blk-core.c
block/blk-mq-sched.c
block/blk-mq.c
block/blk-mq.h
crypto/testmgr.h
drivers/acpi/acpica/evgpe.c
drivers/acpi/acpica/nsobject.c
drivers/acpi/nfit/core.c
drivers/acpi/nfit/intel.c
drivers/base/memory.c
drivers/block/null_blk_main.c
drivers/block/paride/pcd.c
drivers/block/paride/pf.c
drivers/block/virtio_blk.c
drivers/block/xsysace.c
drivers/bluetooth/btusb.c
drivers/char/Kconfig
drivers/char/hpet.c
drivers/char/ipmi/ipmi_dmi.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_si_hardcode.c
drivers/char/tpm/eventlog/tpm2.c
drivers/char/tpm/tpm-dev-common.c
drivers/char/tpm/tpm-interface.c
drivers/clk/at91/clk-programmable.c
drivers/clk/at91/pmc.h
drivers/clk/at91/sama5d2.c
drivers/clk/imx/clk-pll14xx.c
drivers/clk/mediatek/clk-gate.c
drivers/clk/meson/clk-pll.c
drivers/clk/meson/g12a.c
drivers/clk/meson/gxbb.c
drivers/clk/meson/vid-pll-div.c
drivers/clk/x86/clk-pmc-atom.c
drivers/clocksource/Kconfig
drivers/clocksource/arm_arch_timer.c
drivers/clocksource/timer-oxnas-rps.c
drivers/clocksource/timer-ti-dm.c
drivers/cpufreq/intel_pstate.c
drivers/crypto/caam/caamhash.c
drivers/extcon/Kconfig
drivers/firmware/google/vpd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/dmabuf.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/icl_dsi.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/vlv_dsi.c
drivers/gpu/drm/mediatek/mtk_dpi.c
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/mediatek/mtk_drm_gem.c
drivers/gpu/drm/mediatek/mtk_drm_gem.h
drivers/gpu/drm/mediatek/mtk_hdmi.c
drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
drivers/gpu/drm/mediatek/mtk_hdmi_phy.h
drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c
drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
drivers/gpu/drm/sun4i/sun8i_tcon_top.c
drivers/gpu/drm/tegra/hdmi.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/udl/udl_drv.c
drivers/gpu/drm/udl/udl_drv.h
drivers/gpu/drm/udl/udl_main.c
drivers/gpu/host1x/hw/channel_hw.c
drivers/hid/Kconfig
drivers/hid/hid-core.c
drivers/hid/hid-debug.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-quirks.c
drivers/hid/hid-steam.c
drivers/hid/hid-uclogic-params.c
drivers/hid/i2c-hid/i2c-hid-core.c
drivers/hwmon/Kconfig
drivers/hwmon/ntc_thermistor.c
drivers/hwmon/occ/common.c
drivers/i2c/busses/i2c-imx.c
drivers/i3c/master.c
drivers/i3c/master/dw-i3c-master.c
drivers/iio/accel/kxcjk-1013.c
drivers/iio/adc/ad_sigma_delta.c
drivers/iio/adc/at91_adc.c
drivers/iio/adc/xilinx-xadc-core.c
drivers/iio/chemical/Kconfig
drivers/iio/chemical/bme680.h
drivers/iio/chemical/bme680_core.c
drivers/iio/chemical/bme680_i2c.c
drivers/iio/chemical/bme680_spi.c
drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
drivers/iio/dac/mcp4725.c
drivers/iio/gyro/bmg160_core.c
drivers/iio/gyro/mpu3050-core.c
drivers/iio/industrialio-buffer.c
drivers/iio/industrialio-core.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/qp.c
drivers/infiniband/hw/hfi1/rc.c
drivers/infiniband/hw/hfi1/tid_rdma.c
drivers/infiniband/hw/hns/hns_roce_hem.c
drivers/infiniband/hw/hns/hns_roce_mr.c
drivers/infiniband/hw/hns/hns_roce_qp.c
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
drivers/input/keyboard/snvs_pwrkey.c
drivers/input/mouse/elan_i2c_core.c
drivers/iommu/amd_iommu_init.c
drivers/irqchip/irq-ls1x.c
drivers/isdn/mISDN/socket.c
drivers/lightnvm/pblk-read.c
drivers/md/dm-core.h
drivers/md/dm-init.c
drivers/md/dm-integrity.c
drivers/md/dm-rq.c
drivers/md/dm-table.c
drivers/md/dm.c
drivers/mfd/Kconfig
drivers/mfd/sprd-sc27xx-spi.c
drivers/mfd/twl-core.c
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/aspeed-p2a-ctrl.c [new file with mode: 0644]
drivers/misc/cardreader/rts5260.c
drivers/misc/fastrpc.c
drivers/misc/habanalabs/Makefile
drivers/misc/habanalabs/command_buffer.c
drivers/misc/habanalabs/command_submission.c
drivers/misc/habanalabs/debugfs.c
drivers/misc/habanalabs/device.c
drivers/misc/habanalabs/firmware_if.c [new file with mode: 0644]
drivers/misc/habanalabs/goya/Makefile
drivers/misc/habanalabs/goya/goya.c
drivers/misc/habanalabs/goya/goyaP.h
drivers/misc/habanalabs/goya/goya_coresight.c [new file with mode: 0644]
drivers/misc/habanalabs/goya/goya_security.c
drivers/misc/habanalabs/habanalabs.h
drivers/misc/habanalabs/habanalabs_drv.c
drivers/misc/habanalabs/habanalabs_ioctl.c
drivers/misc/habanalabs/include/armcp_if.h
drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h
drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h
drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h
drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h
drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h
drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h
drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h
drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h
drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h
drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h
drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h [new file with mode: 0644]
drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h
drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h
drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h
drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h
drivers/misc/habanalabs/include/goya/goya.h
drivers/misc/habanalabs/include/goya/goya_async_events.h
drivers/misc/habanalabs/include/goya/goya_coresight.h [new file with mode: 0644]
drivers/misc/habanalabs/include/goya/goya_fw_if.h
drivers/misc/habanalabs/include/hl_boot_if.h
drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h [new file with mode: 0644]
drivers/misc/habanalabs/memory.c
drivers/misc/habanalabs/mmu.c
drivers/misc/habanalabs/pci.c [new file with mode: 0644]
drivers/misc/mei/Kconfig
drivers/misc/mei/Makefile
drivers/misc/mei/bus-fixup.c
drivers/misc/mei/bus.c
drivers/misc/mei/client.c
drivers/misc/mei/client.h
drivers/misc/mei/debugfs.c
drivers/misc/mei/dma-ring.c
drivers/misc/mei/hbm.c
drivers/misc/mei/hbm.h
drivers/misc/mei/hdcp/Kconfig [new file with mode: 0644]
drivers/misc/mei/hdcp/Makefile
drivers/misc/mei/hdcp/mei_hdcp.c
drivers/misc/mei/hdcp/mei_hdcp.h
drivers/misc/mei/hw-me-regs.h
drivers/misc/mei/hw-me.c
drivers/misc/mei/hw-me.h
drivers/misc/mei/hw-txe-regs.h
drivers/misc/mei/hw-txe.c
drivers/misc/mei/hw-txe.h
drivers/misc/mei/hw.h
drivers/misc/mei/init.c
drivers/misc/mei/interrupt.c
drivers/misc/mei/main.c
drivers/misc/mei/mei-trace.c
drivers/misc/mei/mei-trace.h
drivers/misc/mei/mei_dev.h
drivers/misc/mei/pci-me.c
drivers/misc/mei/pci-txe.c
drivers/misc/sgi-xp/xpc_uv.c
drivers/mmc/host/alcor.c
drivers/mmc/host/sdhci-omap.c
drivers/mtd/chips/cfi_cmdset_0002.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_sysfs_slave.c
drivers/net/dsa/mv88e6xxx/port.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hns/hnae.c
drivers/net/ethernet/hisilicon/hns/hnae.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
drivers/net/ethernet/hisilicon/hns_mdio.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/fm10k/fm10k_main.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_ptp.c
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/igb/e1000_defines.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/port.c
drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
drivers/net/ethernet/mellanox/mlx5/core/en_common.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/neterion/vxge/vxge-config.c
drivers/net/ethernet/netronome/nfp/flower/action.c
drivers/net/ethernet/netronome/nfp/flower/cmsg.h
drivers/net/ethernet/netronome/nfp/flower/match.c
drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
drivers/net/ethernet/qlogic/qed/qed.h
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_int.c
drivers/net/ethernet/qlogic/qed/qed_int.h
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_sriov.c
drivers/net/ethernet/qlogic/qede/qede_ptp.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/stmicro/stmmac/descs_com.h
drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
drivers/net/ethernet/stmicro/stmmac/enh_desc.c
drivers/net/ethernet/stmicro/stmmac/hwif.h
drivers/net/ethernet/stmicro/stmmac/norm_desc.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/team/team.c
drivers/net/usb/qmi_wwan.c
drivers/net/vrf.c
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/intel/iwlwifi/cfg/22000.c
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/fw/init.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-csr.h
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mediatek/mt76/mt7603/init.c
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
drivers/net/wireless/mediatek/mt76/mt7603/main.c
drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
drivers/net/wireless/ralink/rt2x00/rt2x00.h
drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
drivers/nfc/mei_phy.c
drivers/nfc/microread/mei.c
drivers/nfc/pn544/mei.c
drivers/nvdimm/btt_devs.c
drivers/nvdimm/namespace_devs.c
drivers/nvdimm/pmem.c
drivers/nvdimm/security.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/discovery.c
drivers/nvme/target/nvmet.h
drivers/parisc/iosapic.c
drivers/parport/parport_cs.c
drivers/pci/hotplug/pciehp_ctrl.c
drivers/pci/quirks.c
drivers/platform/x86/pmc_atom.c
drivers/power/supply/goldfish_battery.c
drivers/reset/reset-meson-audio-arb.c
drivers/rtc/Kconfig
drivers/rtc/rtc-cros-ec.c
drivers/rtc/rtc-da9063.c
drivers/rtc/rtc-sh.c
drivers/s390/block/dasd_eckd.c
drivers/s390/char/con3270.c
drivers/s390/char/fs3270.c
drivers/s390/char/raw3270.c
drivers/s390/char/raw3270.h
drivers/s390/char/tty3270.c
drivers/s390/crypto/ap_queue.c
drivers/s390/crypto/pkey_api.c
drivers/scsi/aic7xxx/aic7770_osm.c
drivers/scsi/aic7xxx/aic7xxx.h
drivers/scsi/aic7xxx/aic7xxx_osm.c
drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
drivers/scsi/csiostor/csio_scsi.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/qedi/qedi_main.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/scsi_dh.c
drivers/scsi/scsi_lib.c
drivers/scsi/storvsc_drv.c
drivers/scsi/virtio_scsi.c
drivers/staging/comedi/drivers/ni_usb6501.c
drivers/staging/comedi/drivers/vmk80xx.c
drivers/staging/erofs/data.c
drivers/staging/iio/adc/ad7192.c
drivers/staging/iio/meter/ade7854.c
drivers/staging/most/core.c
drivers/tty/serial/sc16is7xx.c
drivers/tty/serial/sh-sci.c
drivers/tty/vt/vt.c
drivers/uio/uio_fsl_elbc_gpcm.c
drivers/vfio/pci/vfio_pci.c
drivers/vfio/vfio_iommu_spapr_tce.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/vhost.c
drivers/virtio/virtio_pci_common.c
drivers/virtio/virtio_ring.c
drivers/w1/masters/ds2482.c
drivers/xen/privcmd-buf.c
drivers/xen/xenbus/xenbus_dev_frontend.c
fs/afs/callback.c
fs/afs/cmservice.c
fs/afs/inode.c
fs/afs/internal.h
fs/afs/rxrpc.c
fs/afs/server.c
fs/afs/write.c
fs/aio.c
fs/block_dev.c
fs/btrfs/ioctl.c
fs/btrfs/props.c
fs/char_dev.c
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/misc.c
fs/cifs/smb2file.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2proto.h
fs/dax.c
fs/debugfs/inode.c
fs/fuse/dev.c
fs/hugetlbfs/inode.c
fs/io_uring.c
fs/jffs2/readinode.c
fs/jffs2/super.c
fs/nfs/nfs42proc.c
fs/nfs/nfs4file.c
fs/nfs/nfs4xdr.c
fs/nfs/super.c
fs/open.c
fs/pipe.c
fs/proc/base.c
fs/proc/task_mmu.c
fs/read_write.c
fs/splice.c
fs/ubifs/super.c
fs/userfaultfd.c
include/asm-generic/syscall.h
include/drm/drm_modeset_helper_vtables.h
include/dt-bindings/clock/sifive-fu540-prci.h [new file with mode: 0644]
include/dt-bindings/reset/amlogic,meson-g12a-reset.h
include/keys/trusted.h
include/linux/bio.h
include/linux/bitrev.h
include/linux/blk-mq.h
include/linux/blkdev.h
include/linux/bvec.h
include/linux/efi.h
include/linux/elevator.h
include/linux/fs.h
include/linux/kernel.h
include/linux/kprobes.h
include/linux/kvm_host.h
include/linux/mei_cl_bus.h
include/linux/memcontrol.h
include/linux/mii.h
include/linux/mlx5/driver.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/netdevice.h
include/linux/nvme.h
include/linux/pipe_fs_i.h
include/linux/platform_data/x86/clk-pmc-atom.h
include/linux/ptrace.h
include/linux/sched/mm.h
include/linux/shmem_fs.h
include/linux/string.h
include/linux/sunrpc/sched.h
include/linux/virtio_ring.h
include/linux/vmw_vmci_defs.h
include/net/af_rxrpc.h
include/net/cfg80211.h
include/net/ip.h
include/net/mac80211.h
include/net/net_namespace.h
include/net/netns/hash.h
include/net/netrom.h
include/net/nfc/nci_core.h
include/net/sch_generic.h
include/net/sock.h
include/net/tls.h
include/sound/soc.h
include/trace/events/syscalls.h
include/uapi/linux/aspeed-p2a-ctrl.h [new file with mode: 0644]
include/uapi/linux/ethtool.h
include/uapi/linux/input-event-codes.h
include/uapi/linux/mei.h
include/uapi/misc/habanalabs.h
include/uapi/sound/asound.h
init/main.c
kernel/bpf/cpumap.c
kernel/bpf/inode.c
kernel/bpf/verifier.c
kernel/dma/debug.c
kernel/events/core.c
kernel/events/ring_buffer.c
kernel/irq/chip.c
kernel/irq/irqdesc.c
kernel/kprobes.c
kernel/locking/lockdep.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/seccomp.c
kernel/signal.c
kernel/sysctl.c
kernel/time/alarmtimer.c
kernel/time/sched_clock.c
kernel/time/tick-common.c
kernel/time/timekeeping.h
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace_syscalls.c
kernel/watchdog_hld.c
lib/Kconfig.debug
lib/iov_iter.c
lib/lzo/lzo1x_compress.c
lib/lzo/lzo1x_decompress_safe.c
lib/string.c
lib/syscall.c
mm/compaction.c
mm/gup.c
mm/huge_memory.c
mm/hugetlb.c
mm/kmemleak.c
mm/memcontrol.c
mm/mmap.c
mm/page_alloc.c
mm/percpu.c
mm/shmem.c
mm/slab.c
mm/swapfile.c
mm/util.c
mm/vmscan.c
mm/vmstat.c
net/8021q/vlan_dev.c
net/atm/lec.c
net/batman-adv/bat_v_elp.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/sysfs.c
net/batman-adv/translation-table.c
net/bluetooth/sco.c
net/bridge/br_input.c
net/bridge/br_multicast.c
net/bridge/br_netlink.c
net/core/datagram.c
net/core/dev.c
net/core/ethtool.c
net/core/failover.c
net/core/filter.c
net/core/flow_dissector.c
net/core/net-sysfs.c
net/core/net_namespace.c
net/core/ptp_classifier.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/dccp/feat.c
net/dsa/tag_qca.c
net/ipv4/fou.c
net/ipv4/ip_gre.c
net/ipv4/ip_input.c
net/ipv4/ip_options.c
net/ipv4/route.c
net/ipv4/tcp_dctcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv6/ila/ila_xlat.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/udp.c
net/kcm/kcmsock.c
net/llc/af_llc.c
net/mac80211/driver-ops.h
net/mac80211/key.c
net/mac80211/mesh_pathtbl.c
net/mac80211/rx.c
net/mac80211/trace_msg.h
net/mac80211/tx.c
net/netlink/af_netlink.c
net/netrom/af_netrom.c
net/netrom/nr_loopback.c
net/netrom/nr_route.c
net/netrom/sysctl_net_netrom.c
net/nfc/nci/hci.c
net/openvswitch/flow_netlink.c
net/rds/af_rds.c
net/rds/bind.c
net/rds/tcp.c
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-internal.h
net/rxrpc/conn_event.c
net/rxrpc/input.c
net/rxrpc/peer_event.c
net/rxrpc/sendmsg.c
net/sched/act_sample.c
net/sched/cls_matchall.c
net/sched/sch_cake.c
net/sched/sch_cbq.c
net/sched/sch_drr.c
net/sched/sch_hfsc.c
net/sched/sch_htb.c
net/sched/sch_mq.c
net/sched/sch_mqprio.c
net/sched/sch_multiq.c
net/sched/sch_prio.c
net/sched/sch_qfq.c
net/sched/sch_red.c
net/sched/sch_sfb.c
net/sched/sch_taprio.c
net/sched/sch_tbf.c
net/sctp/protocol.c
net/sctp/socket.c
net/smc/af_smc.c
net/smc/smc_close.c
net/smc/smc_close.h
net/smc/smc_ism.c
net/smc/smc_pnet.c
net/strparser/strparser.c
net/sunrpc/clnt.c
net/sunrpc/xprtrdma/verbs.c
net/tipc/link.c
net/tipc/name_table.c
net/tipc/netlink_compat.c
net/tipc/sysctl.c
net/tls/tls_device.c
net/tls/tls_main.c
net/tls/tls_sw.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/scan.c
net/wireless/util.c
scripts/atomic/gen-atomics.sh
scripts/coccinelle/api/stream_open.cocci [new file with mode: 0644]
security/apparmor/lsm.c
security/device_cgroup.c
security/keys/trusted.c
sound/core/info.c
sound/core/init.c
sound/core/seq/seq_clientmgr.c
sound/hda/ext/hdac_ext_bus.c
sound/hda/hdac_bus.c
sound/hda/hdac_component.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/Kconfig
sound/soc/codecs/ab8500-codec.c
sound/soc/codecs/cs35l35.c
sound/soc/codecs/cs4270.c
sound/soc/codecs/hdac_hda.c
sound/soc/codecs/hdac_hda.h
sound/soc/codecs/hdmi-codec.c
sound/soc/codecs/nau8810.c
sound/soc/codecs/nau8824.c
sound/soc/codecs/rt5682.c
sound/soc/codecs/tlv320aic32x4-i2c.c
sound/soc/codecs/tlv320aic32x4-spi.c
sound/soc/codecs/tlv320aic32x4.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/codecs/wm_adsp.c
sound/soc/codecs/wm_adsp.h
sound/soc/fsl/fsl_asrc.c
sound/soc/fsl/fsl_esai.c
sound/soc/generic/audio-graph-card.c
sound/soc/generic/simple-card.c
sound/soc/intel/atom/sst-mfld-platform-pcm.c
sound/soc/intel/boards/cht_bsw_max98090_ti.c
sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
sound/soc/intel/skylake/skl-messages.c
sound/soc/intel/skylake/skl-pcm.c
sound/soc/mediatek/common/mtk-btcvsd.c
sound/soc/mediatek/mt8183/mt8183-afe-clk.c
sound/soc/rockchip/rockchip_pdm.c
sound/soc/samsung/i2s.c
sound/soc/samsung/odroid.c
sound/soc/sh/rcar/core.c
sound/soc/sh/rcar/rsnd.h
sound/soc/sh/rcar/src.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/soc/soc-pcm.c
sound/soc/soc-topology.c
sound/soc/stm/stm32_adfsdm.c
sound/soc/stm/stm32_i2s.c
sound/soc/stm/stm32_sai.c
sound/soc/stm/stm32_sai_sub.c
sound/xen/xen_snd_front_alsa.c
tools/include/uapi/sound/asound.h
tools/io_uring/io_uring-bench.c
tools/lib/bpf/Makefile
tools/lib/bpf/btf.c
tools/lib/traceevent/event-parse.c
tools/objtool/check.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/scripts/python/export-to-sqlite.py
tools/perf/util/env.c
tools/perf/util/evlist.c
tools/perf/util/evsel.c
tools/perf/util/header.c
tools/perf/util/map.c
tools/perf/util/map.h
tools/power/x86/turbostat/turbostat.c
tools/testing/nvdimm/test/nfit.c
tools/testing/selftests/bpf/prog_tests/flow_dissector.c
tools/testing/selftests/bpf/progs/bpf_flow.c
tools/testing/selftests/bpf/test_btf.c
tools/testing/selftests/bpf/verifier/calls.c
tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/include/x86_64/processor.h
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/lib/x86_64/processor.c
tools/testing/selftests/kvm/x86_64/evmcs_test.c
tools/testing/selftests/kvm/x86_64/smm_test.c [new file with mode: 0644]
tools/testing/selftests/kvm/x86_64/state_test.c
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/proc/proc-pid-vm.c
tools/testing/selftests/proc/proc-self-map-files-002.c
tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
tools/testing/selftests/tpm2/tpm2.py
tools/testing/selftests/tpm2/tpm2_tests.py
virt/kvm/irqchip.c
virt/kvm/kvm_main.c

index f49620f506f17a95bda75dd4cbfd2a544ee0a8b4..f3923a1f98583bef70d7beeac5954da858079a3c 100644 (file)
@@ -78,6 +78,8 @@ ForEachMacros:
   - 'ata_qc_for_each_with_internal'
   - 'ax25_for_each'
   - 'ax25_uid_for_each'
+  - '__bio_for_each_bvec'
+  - 'bio_for_each_bvec'
   - 'bio_for_each_integrity_vec'
   - '__bio_for_each_segment'
   - 'bio_for_each_segment'
@@ -118,10 +120,12 @@ ForEachMacros:
   - 'drm_for_each_legacy_plane'
   - 'drm_for_each_plane'
   - 'drm_for_each_plane_mask'
+  - 'drm_for_each_privobj'
   - 'drm_mm_for_each_hole'
   - 'drm_mm_for_each_node'
   - 'drm_mm_for_each_node_in_range'
   - 'drm_mm_for_each_node_safe'
+  - 'flow_action_for_each'
   - 'for_each_active_drhd_unit'
   - 'for_each_active_iommu'
   - 'for_each_available_child_of_node'
@@ -158,6 +162,9 @@ ForEachMacros:
   - 'for_each_dss_dev'
   - 'for_each_efi_memory_desc'
   - 'for_each_efi_memory_desc_in_map'
+  - 'for_each_element'
+  - 'for_each_element_extid'
+  - 'for_each_element_id'
   - 'for_each_endpoint_of_node'
   - 'for_each_evictable_lru'
   - 'for_each_fib6_node_rt_rcu'
@@ -195,6 +202,7 @@ ForEachMacros:
   - 'for_each_net_rcu'
   - 'for_each_new_connector_in_state'
   - 'for_each_new_crtc_in_state'
+  - 'for_each_new_mst_mgr_in_state'
   - 'for_each_new_plane_in_state'
   - 'for_each_new_private_obj_in_state'
   - 'for_each_node'
@@ -210,8 +218,10 @@ ForEachMacros:
   - 'for_each_of_pci_range'
   - 'for_each_old_connector_in_state'
   - 'for_each_old_crtc_in_state'
+  - 'for_each_old_mst_mgr_in_state'
   - 'for_each_oldnew_connector_in_state'
   - 'for_each_oldnew_crtc_in_state'
+  - 'for_each_oldnew_mst_mgr_in_state'
   - 'for_each_oldnew_plane_in_state'
   - 'for_each_oldnew_plane_in_state_reverse'
   - 'for_each_oldnew_private_obj_in_state'
@@ -243,6 +253,9 @@ ForEachMacros:
   - 'for_each_sg_dma_page'
   - 'for_each_sg_page'
   - 'for_each_sibling_event'
+  - 'for_each_subelement'
+  - 'for_each_subelement_extid'
+  - 'for_each_subelement_id'
   - '__for_each_thread'
   - 'for_each_thread'
   - 'for_each_zone'
@@ -252,6 +265,8 @@ ForEachMacros:
   - 'fwnode_for_each_child_node'
   - 'fwnode_graph_for_each_endpoint'
   - 'gadget_for_each_ep'
+  - 'genradix_for_each'
+  - 'genradix_for_each_from'
   - 'hash_for_each'
   - 'hash_for_each_possible'
   - 'hash_for_each_possible_rcu'
@@ -293,7 +308,11 @@ ForEachMacros:
   - 'key_for_each'
   - 'key_for_each_safe'
   - 'klp_for_each_func'
+  - 'klp_for_each_func_safe'
+  - 'klp_for_each_func_static'
   - 'klp_for_each_object'
+  - 'klp_for_each_object_safe'
+  - 'klp_for_each_object_static'
   - 'kvm_for_each_memslot'
   - 'kvm_for_each_vcpu'
   - 'list_for_each'
@@ -324,6 +343,8 @@ ForEachMacros:
   - 'media_device_for_each_intf'
   - 'media_device_for_each_link'
   - 'media_device_for_each_pad'
+  - 'mp_bvec_for_each_page'
+  - 'mp_bvec_for_each_segment'
   - 'nanddev_io_for_each_page'
   - 'netdev_for_each_lower_dev'
   - 'netdev_for_each_lower_private'
@@ -375,6 +396,7 @@ ForEachMacros:
   - 'rht_for_each_rcu'
   - 'rht_for_each_rcu_continue'
   - '__rq_for_each_bio'
+  - 'rq_for_each_bvec'
   - 'rq_for_each_segment'
   - 'scsi_for_each_prot_sg'
   - 'scsi_for_each_sg'
@@ -410,6 +432,8 @@ ForEachMacros:
   - 'v4l2_m2m_for_each_src_buf_safe'
   - 'virtio_device_for_each_vq'
   - 'xa_for_each'
+  - 'xa_for_each_marked'
+  - 'xa_for_each_start'
   - 'xas_for_each'
   - 'xas_for_each_conflict'
   - 'xas_for_each_marked'
index b2cde8668dcc38f85c6f0f1bac9da5fa2e9f63b9..ae2bcad06f4b58eb3db18020df75cabeb59b6ed0 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -156,6 +156,8 @@ Morten Welinder <welinder@darter.rentec.com>
 Morten Welinder <welinder@troll.com>
 Mythri P K <mythripk@ti.com>
 Nguyen Anh Quynh <aquynh@gmail.com>
+Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
+Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org>
 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
 Patrick Mochel <mochel@digitalimplant.org>
 Paul Burton <paul.burton@mips.com> <paul.burton@imgtec.com>
index 17d7444a239733c21e1af96ab3b9b4987d085bee..a92d844f806ebc107ac325f3540cdc88224b77f5 100644 (file)
@@ -65,3 +65,18 @@ Description: Display the ME firmware version.
                <platform>:<major>.<minor>.<milestone>.<build_no>.
                There can be up to three such blocks for different
                FW components.
+
+What:          /sys/class/mei/meiN/dev_state
+Date:          Mar 2019
+KernelVersion: 5.1
+Contact:       Tomas Winkler <tomas.winkler@intel.com>
+Description:   Display the ME device state.
+
+               The device state can have following values:
+               INITIALIZING
+               INIT_CLIENTS
+               ENABLED
+               RESETTING
+               DISABLED
+               POWER_DOWN
+               POWER_UP
index b8ca28b60215a48f1ee99cfae36f20e0b8d0e8da..7e71c9c1d8e9c7eee70ef957610b483a58739232 100644 (file)
@@ -56,12 +56,12 @@ situation from a state where some tasks are stalled but the CPU is
 still doing productive work. As such, time spent in this subset of the
 stall state is tracked separately and exported in the "full" averages.
 
-The ratios are tracked as recent trends over ten, sixty, and three
-hundred second windows, which gives insight into short term events as
-well as medium and long term trends. The total absolute stall time is
-tracked and exported as well, to allow detection of latency spikes
-which wouldn't necessarily make a dent in the time averages, or to
-average trends over custom time frames.
+The ratios (in %) are tracked as recent trends over ten, sixty, and
+three hundred second windows, which gives insight into short term events
+as well as medium and long term trends. The total absolute stall time
+(in us) is tracked and exported as well, to allow detection of latency
+spikes which wouldn't necessarily make a dent in the time averages,
+or to average trends over custom time frames.
 
 Cgroup2 interface
 =================
index 9a60a5d60e380ab2204334086a58ebef1f3b11e5..7313d354f20e6402e23161f24ddab227afb7c0a1 100644 (file)
@@ -148,16 +148,16 @@ The ``btf_type.size * 8`` must be equal to or greater than ``BTF_INT_BITS()``
 for the type. The maximum value of ``BTF_INT_BITS()`` is 128.
 
 The ``BTF_INT_OFFSET()`` specifies the starting bit offset to calculate values
-for this int. For example, a bitfield struct member has: * btf member bit
-offset 100 from the start of the structure, * btf member pointing to an int
-type, * the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4``
+for this int. For example, a bitfield struct member has:
+ * btf member bit offset 100 from the start of the structure,
+ * btf member pointing to an int type,
+ * the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4``
 
 Then in the struct memory layout, this member will occupy ``4`` bits starting
 from bits ``100 + 2 = 102``.
 
 Alternatively, the bitfield struct member can be the following to access the
 same bits as the above:
-
  * btf member bit offset 102,
  * btf member pointing to an int type,
  * the int type has ``BTF_INT_OFFSET() = 0`` and ``BTF_INT_BITS() = 4``
index 365dcf384d73922a22bc70a32e98444122bc4fa7..82dd7582e945461efbdff77c8222bdc1e0e162b9 100644 (file)
@@ -228,7 +228,7 @@ patternProperties:
                 - renesas,r9a06g032-smp
                 - rockchip,rk3036-smp
                 - rockchip,rk3066-smp
-               - socionext,milbeaut-m10v-smp
+                - socionext,milbeaut-m10v-smp
                 - ste,dbx500-smp
 
       cpu-release-addr:
index 08bab0e94d25a21b8ed4d87738641e5ffe74bfa2..d0ae46d7bac370d9db369d9e471ef632be2482d9 100644 (file)
@@ -26,7 +26,7 @@ Required node properties:
 
 Optional node properties:
 
- - ti,mode:     Operation mode (see above).
+ - ti,mode:     Operation mode (u8) (see above).
 
 
 Example (operation mode 2):
@@ -34,5 +34,5 @@ Example (operation mode 2):
        adc128d818@1d {
                compatible = "ti,adc128d818";
                reg = <0x1d>;
-               ti,mode = <2>;
+               ti,mode = /bits/ 8 <2>;
        };
diff --git a/Documentation/devicetree/bindings/misc/aspeed-p2a-ctrl.txt b/Documentation/devicetree/bindings/misc/aspeed-p2a-ctrl.txt
new file mode 100644 (file)
index 0000000..854bd67
--- /dev/null
@@ -0,0 +1,47 @@
+======================================================================
+Device tree bindings for Aspeed AST2400/AST2500 PCI-to-AHB Bridge Control Driver
+======================================================================
+
+The bridge is available on platforms with the VGA enabled on the Aspeed device.
+In this case, the host has access to a 64KiB window into all of the BMC's
+memory.  The BMC can disable this bridge.  If the bridge is enabled, the host
+has read access to all the regions of memory, however the host only has read
+and write access depending on a register controlled by the BMC.
+
+Required properties:
+===================
+
+ - compatible: must be one of:
+       - "aspeed,ast2400-p2a-ctrl"
+       - "aspeed,ast2500-p2a-ctrl"
+
+Optional properties:
+===================
+
+- memory-region: A phandle to a reserved_memory region to be used for the PCI
+               to AHB mapping
+
+The p2a-control node should be the child of a syscon node with the required
+property:
+
+- compatible : Should be one of the following:
+               "aspeed,ast2400-scu", "syscon", "simple-mfd"
+               "aspeed,g4-scu", "syscon", "simple-mfd"
+               "aspeed,ast2500-scu", "syscon", "simple-mfd"
+               "aspeed,g5-scu", "syscon", "simple-mfd"
+
+Example
+===================
+
+g4 Example
+----------
+
+syscon: scu@1e6e2000 {
+       compatible = "aspeed,ast2400-scu", "syscon", "simple-mfd";
+       reg = <0x1e6e2000 0x1a8>;
+
+       p2a: p2a-control {
+               compatible = "aspeed,ast2400-p2a-ctrl";
+               memory-region = <&reserved_memory>;
+       };
+};
index f79934225d8d35bd98d3ebd4dcc996324abd66b9..ca983328976bcf36e040ab7508eb29583ed5ca4e 100644 (file)
@@ -102,9 +102,11 @@ Byte sequences
                 dictionary which is empty, and that it will always be
                 invalid at this place.
 
-      17      : bitstream version. If the first byte is 17, the next byte
-                gives the bitstream version (version 1 only). If the first byte
-                is not 17, the bitstream version is 0.
+      17      : bitstream version. If the first byte is 17, and compressed
+                stream length is at least 5 bytes (length of shortest possible
+                versioned bitstream), the next byte gives the bitstream version
+                (version 1 only).
+                Otherwise, the bitstream version is 0.
 
       18..21  : copy 0..3 literals
                 state = (byte - 17) = 0..3  [ copy <state> literals ]
index f460031d85313821ac91d940b915d26994d1ed00..177ac44fa0fac33363d34f840c663d4699039a79 100644 (file)
@@ -623,7 +623,7 @@ the remote via /dev/input/event devices.
 
     -  .. row 78
 
-       -  ``KEY_SCREEN``
+       -  ``KEY_ASPECT_RATIO``
 
        -  Select screen aspect ratio
 
@@ -631,7 +631,7 @@ the remote via /dev/input/event devices.
 
     -  .. row 79
 
-       -  ``KEY_ZOOM``
+       -  ``KEY_FULL_SCREEN``
 
        -  Put device into zoom/full screen mode
 
diff --git a/Documentation/networking/bpf_flow_dissector.rst b/Documentation/networking/bpf_flow_dissector.rst
new file mode 100644 (file)
index 0000000..b375ae2
--- /dev/null
@@ -0,0 +1,126 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==================
+BPF Flow Dissector
+==================
+
+Overview
+========
+
+Flow dissector is a routine that parses metadata out of the packets. It's
+used in the various places in the networking subsystem (RFS, flow hash, etc).
+
+BPF flow dissector is an attempt to reimplement C-based flow dissector logic
+in BPF to gain all the benefits of BPF verifier (namely, limits on the
+number of instructions and tail calls).
+
+API
+===
+
+BPF flow dissector programs operate on an ``__sk_buff``. However, only the
+limited set of fields is allowed: ``data``, ``data_end`` and ``flow_keys``.
+``flow_keys`` is ``struct bpf_flow_keys`` and contains flow dissector input
+and output arguments.
+
+The inputs are:
+  * ``nhoff`` - initial offset of the networking header
+  * ``thoff`` - initial offset of the transport header, initialized to nhoff
+  * ``n_proto`` - L3 protocol type, parsed out of L2 header
+
+Flow dissector BPF program should fill out the rest of the ``struct
+bpf_flow_keys`` fields. Input arguments ``nhoff/thoff/n_proto`` should be
+also adjusted accordingly.
+
+The return code of the BPF program is either BPF_OK to indicate successful
+dissection, or BPF_DROP to indicate parsing error.
+
+__sk_buff->data
+===============
+
+In the VLAN-less case, this is what the initial state of the BPF flow
+dissector looks like::
+
+  +------+------+------------+-----------+
+  | DMAC | SMAC | ETHER_TYPE | L3_HEADER |
+  +------+------+------------+-----------+
+                              ^
+                              |
+                              +-- flow dissector starts here
+
+
+.. code:: c
+
+  skb->data + flow_keys->nhoff point to the first byte of L3_HEADER
+  flow_keys->thoff = nhoff
+  flow_keys->n_proto = ETHER_TYPE
+
+In case of VLAN, flow dissector can be called with the two different states.
+
+Pre-VLAN parsing::
+
+  +------+------+------+-----+-----------+-----------+
+  | DMAC | SMAC | TPID | TCI |ETHER_TYPE | L3_HEADER |
+  +------+------+------+-----+-----------+-----------+
+                        ^
+                        |
+                        +-- flow dissector starts here
+
+.. code:: c
+
+  skb->data + flow_keys->nhoff point the to first byte of TCI
+  flow_keys->thoff = nhoff
+  flow_keys->n_proto = TPID
+
+Please note that TPID can be 802.1AD and, hence, BPF program would
+have to parse VLAN information twice for double tagged packets.
+
+
+Post-VLAN parsing::
+
+  +------+------+------+-----+-----------+-----------+
+  | DMAC | SMAC | TPID | TCI |ETHER_TYPE | L3_HEADER |
+  +------+------+------+-----+-----------+-----------+
+                                          ^
+                                          |
+                                          +-- flow dissector starts here
+
+.. code:: c
+
+  skb->data + flow_keys->nhoff point the to first byte of L3_HEADER
+  flow_keys->thoff = nhoff
+  flow_keys->n_proto = ETHER_TYPE
+
+In this case VLAN information has been processed before the flow dissector
+and BPF flow dissector is not required to handle it.
+
+
+The takeaway here is as follows: BPF flow dissector program can be called with
+the optional VLAN header and should gracefully handle both cases: when single
+or double VLAN is present and when it is not present. The same program
+can be called for both cases and would have to be written carefully to
+handle both cases.
+
+
+Reference Implementation
+========================
+
+See ``tools/testing/selftests/bpf/progs/bpf_flow.c`` for the reference
+implementation and ``tools/testing/selftests/bpf/flow_dissector_load.[hc]``
+for the loader. bpftool can be used to load BPF flow dissector program as well.
+
+The reference implementation is organized as follows:
+  * ``jmp_table`` map that contains sub-programs for each supported L3 protocol
+  * ``_dissect`` routine - entry point; it does input ``n_proto`` parsing and
+    does ``bpf_tail_call`` to the appropriate L3 handler
+
+Since BPF at this point doesn't support looping (or any jumping back),
+jmp_table is used instead to handle multiple levels of encapsulation (and
+IPv6 options).
+
+
+Current Limitations
+===================
+BPF flow dissector doesn't support exporting all the metadata that in-kernel
+C-based implementation can export. Notable example is single VLAN (802.1Q)
+and double VLAN (802.1AD) tags. Please refer to the ``struct bpf_flow_keys``
+for a set of information that's currently can be exported from the BPF context.
index 5449149be496fa8448fa5b74bafe2c5c796cb06d..984e68f9e0269507132846517a4c4c2b8d726216 100644 (file)
@@ -9,6 +9,7 @@ Contents:
    netdev-FAQ
    af_xdp
    batman-adv
+   bpf_flow_dissector
    can
    can_ucan_protocol
    device_drivers/freescale/dpaa2/index
index 2df5894353d6954f5c0dd26d2c149c6e9ee6ee4c..cd7303d7fa25dac9ae38d0e73186f3687b7872a7 100644 (file)
@@ -1009,16 +1009,18 @@ The kernel interface functions are as follows:
 
  (*) Check call still alive.
 
-       u32 rxrpc_kernel_check_life(struct socket *sock,
-                                   struct rxrpc_call *call);
+       bool rxrpc_kernel_check_life(struct socket *sock,
+                                    struct rxrpc_call *call,
+                                    u32 *_life);
        void rxrpc_kernel_probe_life(struct socket *sock,
                                     struct rxrpc_call *call);
 
-     The first function returns a number that is updated when ACKs are received
-     from the peer (notably including PING RESPONSE ACKs which we can elicit by
-     sending PING ACKs to see if the call still exists on the server).  The
-     caller should compare the numbers of two calls to see if the call is still
-     alive after waiting for a suitable interval.
+     The first function passes back in *_life a number that is updated when
+     ACKs are received from the peer (notably including PING RESPONSE ACKs
+     which we can elicit by sending PING ACKs to see if the call still exists
+     on the server).  The caller should compare the numbers of two calls to see
+     if the call is still alive after waiting for a suitable interval.  It also
+     returns true as long as the call hasn't yet reached the completed state.
 
      This allows the caller to work out if the server is still contactable and
      if the call is still alive on the server while waiting for the server to
index c6b36645a77b5c14c532f4e450f14349472d8e9c..b8d72ee0754875f49d8383178d08ccbb7a3d6c45 100644 (file)
@@ -1893,14 +1893,15 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-nomadik.git
 ARM/NUVOTON NPCM ARCHITECTURE
 M:     Avi Fishman <avifishman70@gmail.com>
 M:     Tomer Maimon <tmaimon77@gmail.com>
+M:     Tali Perry <tali.perry1@gmail.com>
 R:     Patrick Venture <venture@google.com>
 R:     Nancy Yuen <yuenn@google.com>
-R:     Brendan Higgins <brendanhiggins@google.com>
+R:     Benjamin Fair <benjaminfair@google.com>
 L:     openbmc@lists.ozlabs.org (moderated for non-subscribers)
 S:     Supported
 F:     arch/arm/mach-npcm/
 F:     arch/arm/boot/dts/nuvoton-npcm*
-F:     include/dt-bindings/clock/nuvoton,npcm7xx-clks.h
+F:     include/dt-bindings/clock/nuvoton,npcm7xx-clock.h
 F:     drivers/*/*npcm*
 F:     Documentation/devicetree/bindings/*/*npcm*
 F:     Documentation/devicetree/bindings/*/*/*npcm*
@@ -4129,7 +4130,7 @@ F:        drivers/cpuidle/*
 F:     include/linux/cpuidle.h
 
 CRAMFS FILESYSTEM
-M:     Nicolas Pitre <nico@linaro.org>
+M:     Nicolas Pitre <nico@fluxnic.net>
 S:     Maintained
 F:     Documentation/filesystems/cramfs.txt
 F:     fs/cramfs/
@@ -5833,7 +5834,7 @@ L:        netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/ABI/testing/sysfs-bus-mdio
 F:     Documentation/devicetree/bindings/net/mdio*
-F:     Documentation/networking/phy.txt
+F:     Documentation/networking/phy.rst
 F:     drivers/net/phy/
 F:     drivers/of/of_mdio.c
 F:     drivers/of/of_net.c
@@ -7332,7 +7333,6 @@ F:        Documentation/devicetree/bindings/i3c/
 F:     Documentation/driver-api/i3c
 F:     drivers/i3c/
 F:     include/linux/i3c/
-F:     include/dt-bindings/i3c/
 
 I3C DRIVER FOR SYNOPSYS DESIGNWARE
 M:     Vitor Soares <vitor.soares@synopsys.com>
@@ -7515,7 +7515,7 @@ F:        include/net/mac802154.h
 F:     include/net/af_ieee802154.h
 F:     include/net/cfg802154.h
 F:     include/net/ieee802154_netdev.h
-F:     Documentation/networking/ieee802154.txt
+F:     Documentation/networking/ieee802154.rst
 
 IFE PROTOCOL
 M:     Yotam Gigi <yotam.gi@gmail.com>
@@ -10145,7 +10145,7 @@ F:      drivers/spi/spi-at91-usart.c
 F:     Documentation/devicetree/bindings/mfd/atmel-usart.txt
 
 MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER
-M:     Woojung Huh <Woojung.Huh@microchip.com>
+M:     Woojung Huh <woojung.huh@microchip.com>
 M:     Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
@@ -13982,7 +13982,7 @@ F:      drivers/media/rc/serial_ir.c
 SFC NETWORK DRIVER
 M:     Solarflare linux maintainers <linux-net-drivers@solarflare.com>
 M:     Edward Cree <ecree@solarflare.com>
-M:     Bert Kenward <bkenward@solarflare.com>
+M:     Martin Habets <mhabets@solarflare.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/sfc/
@@ -16509,7 +16509,7 @@ F:      drivers/char/virtio_console.c
 F:     include/linux/virtio_console.h
 F:     include/uapi/linux/virtio_console.h
 
-VIRTIO CORE, NET AND BLOCK DRIVERS
+VIRTIO CORE AND NET DRIVERS
 M:     "Michael S. Tsirkin" <mst@redhat.com>
 M:     Jason Wang <jasowang@redhat.com>
 L:     virtualization@lists.linux-foundation.org
@@ -16524,6 +16524,19 @@ F:     include/uapi/linux/virtio_*.h
 F:     drivers/crypto/virtio/
 F:     mm/balloon_compaction.c
 
+VIRTIO BLOCK AND SCSI DRIVERS
+M:     "Michael S. Tsirkin" <mst@redhat.com>
+M:     Jason Wang <jasowang@redhat.com>
+R:     Paolo Bonzini <pbonzini@redhat.com>
+R:     Stefan Hajnoczi <stefanha@redhat.com>
+L:     virtualization@lists.linux-foundation.org
+S:     Maintained
+F:     drivers/block/virtio_blk.c
+F:     drivers/scsi/virtio_scsi.c
+F:     include/uapi/linux/virtio_blk.h
+F:     include/uapi/linux/virtio_scsi.h
+F:     drivers/vhost/scsi.c
+
 VIRTIO CRYPTO DRIVER
 M:     Gonglei <arei.gonglei@huawei.com>
 L:     virtualization@lists.linux-foundation.org
index 026fbc450906ad0aba4bf587c880c5c51ed404f8..abe13538a8c04af16e30424282ecebe449a0d88f 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 1
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc6
 NAME = Shy Crocodile
 
 # *DOCUMENTATION*
index 29de098043064a20112dd7ffd02f86059261ae44..c7a4201ed62ba70f9f37275475be215b0e7fb6d1 100644 (file)
@@ -55,12 +55,11 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
  */
 static inline void
 syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
-                     unsigned int i, unsigned int n, unsigned long *args)
+                     unsigned long *args)
 {
        unsigned long *inside_ptregs = &(regs->r0);
-       inside_ptregs -= i;
-
-       BUG_ON((i + n) > 6);
+       unsigned int n = 6;
+       unsigned int i = 0;
 
        while (n--) {
                args[i++] = (*inside_ptregs);
index dce5be5df97bd91abe3ff039e8befab58656124b..edcff79879e780e5aa307dfc0d18f393663a7f78 100644 (file)
                enable-active-high;
        };
 
+       /* TPS79501 */
+       v1_8d_reg: fixedregulator-v1_8d {
+               compatible = "regulator-fixed";
+               regulator-name = "v1_8d";
+               vin-supply = <&vbat>;
+               regulator-min-microvolt = <1800000>;
+               regulator-max-microvolt = <1800000>;
+       };
+
+       /* TPS79501 */
+       v3_3d_reg: fixedregulator-v3_3d {
+               compatible = "regulator-fixed";
+               regulator-name = "v3_3d";
+               vin-supply = <&vbat>;
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+       };
+
        matrix_keypad: matrix_keypad0 {
                compatible = "gpio-matrix-keypad";
                debounce-delay-ms = <5>;
                status = "okay";
 
                /* Regulators */
-               AVDD-supply = <&vaux2_reg>;
-               IOVDD-supply = <&vaux2_reg>;
-               DRVDD-supply = <&vaux2_reg>;
-               DVDD-supply = <&vbat>;
+               AVDD-supply = <&v3_3d_reg>;
+               IOVDD-supply = <&v3_3d_reg>;
+               DRVDD-supply = <&v3_3d_reg>;
+               DVDD-supply = <&v1_8d_reg>;
        };
 };
 
index b128998097ce7180cb2a72291bb83ea2a19d0f52..2c2d8b5b8cf52bf55b28b20a47488363c895681c 100644 (file)
                enable-active-high;
        };
 
+       /* TPS79518 */
+       v1_8d_reg: fixedregulator-v1_8d {
+               compatible = "regulator-fixed";
+               regulator-name = "v1_8d";
+               vin-supply = <&vbat>;
+               regulator-min-microvolt = <1800000>;
+               regulator-max-microvolt = <1800000>;
+       };
+
+       /* TPS78633 */
+       v3_3d_reg: fixedregulator-v3_3d {
+               compatible = "regulator-fixed";
+               regulator-name = "v3_3d";
+               vin-supply = <&vbat>;
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+       };
+
        leds {
                pinctrl-names = "default";
                pinctrl-0 = <&user_leds_s0>;
                status = "okay";
 
                /* Regulators */
-               AVDD-supply = <&vaux2_reg>;
-               IOVDD-supply = <&vaux2_reg>;
-               DRVDD-supply = <&vaux2_reg>;
-               DVDD-supply = <&vbat>;
+               AVDD-supply = <&v3_3d_reg>;
+               IOVDD-supply = <&v3_3d_reg>;
+               DRVDD-supply = <&v3_3d_reg>;
+               DVDD-supply = <&v1_8d_reg>;
        };
 };
 
index f459ec316a22d4cd723d43dd97d3709106aacedd..ca6d9f02a800c8a0e042d43280fa762ebce6fef4 100644 (file)
                        reg = <0xcc000 0x4>;
                        reg-names = "rev";
                        /* Domains (P, C): per_pwrdm, l4ls_clkdm */
-                       clocks = <&l4ls_clkctrl AM3_D_CAN0_CLKCTRL 0>;
+                       clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>;
                        clock-names = "fck";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        reg = <0xd0000 0x4>;
                        reg-names = "rev";
                        /* Domains (P, C): per_pwrdm, l4ls_clkdm */
-                       clocks = <&l4ls_clkctrl AM3_D_CAN1_CLKCTRL 0>;
+                       clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>;
                        clock-names = "fck";
                        #address-cells = <1>;
                        #size-cells = <1>;
index aa107ee41b8b8f3fbc13b92676224561fe0f92c0..ef653c3209bcc995aaa5d5cd79d0b3cf3fd0f8a0 100644 (file)
                        };
 
                        vccio_sd: LDO_REG5 {
+                               regulator-boot-on;
                                regulator-min-microvolt = <1800000>;
                                regulator-max-microvolt = <3300000>;
                                regulator-name = "vccio_sd";
        bus-width = <4>;
        cap-mmc-highspeed;
        cap-sd-highspeed;
-       card-detect-delay = <200>;
+       broken-cd;
        disable-wp;                     /* wp not hooked up */
        pinctrl-names = "default";
        pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
index 0bc2409f6903ffec1512942e9e657d9d983a2c35..192dbc089ade1730b9dce6bca8d356f3b0c83a00 100644 (file)
@@ -25,8 +25,6 @@
 
        gpio_keys: gpio-keys {
                compatible = "gpio-keys";
-               #address-cells = <1>;
-               #size-cells = <0>;
 
                pinctrl-names = "default";
                pinctrl-0 = <&pwr_key_l>;
index ca7d52daa8fb638641e3b90f633bcf1c1a1e5497..a024d1e7e74cd94eade3e5c00ed9d56353ef93aa 100644 (file)
@@ -70,7 +70,7 @@
                        compatible = "arm,cortex-a12";
                        reg = <0x501>;
                        resets = <&cru SRST_CORE1>;
-                       operating-points = <&cpu_opp_table>;
+                       operating-points-v2 = <&cpu_opp_table>;
                        #cooling-cells = <2>; /* min followed by max */
                        clock-latency = <40000>;
                        clocks = <&cru ARMCLK>;
@@ -80,7 +80,7 @@
                        compatible = "arm,cortex-a12";
                        reg = <0x502>;
                        resets = <&cru SRST_CORE2>;
-                       operating-points = <&cpu_opp_table>;
+                       operating-points-v2 = <&cpu_opp_table>;
                        #cooling-cells = <2>; /* min followed by max */
                        clock-latency = <40000>;
                        clocks = <&cru ARMCLK>;
@@ -90,7 +90,7 @@
                        compatible = "arm,cortex-a12";
                        reg = <0x503>;
                        resets = <&cru SRST_CORE3>;
-                       operating-points = <&cpu_opp_table>;
+                       operating-points-v2 = <&cpu_opp_table>;
                        #cooling-cells = <2>; /* min followed by max */
                        clock-latency = <40000>;
                        clocks = <&cru ARMCLK>;
                clock-names = "ref", "pclk";
                power-domains = <&power RK3288_PD_VIO>;
                rockchip,grf = <&grf>;
-               #address-cells = <1>;
-               #size-cells = <0>;
                status = "disabled";
 
                ports {
        gpu_opp_table: gpu-opp-table {
                compatible = "operating-points-v2";
 
-               opp@100000000 {
+               opp-100000000 {
                        opp-hz = /bits/ 64 <100000000>;
                        opp-microvolt = <950000>;
                };
-               opp@200000000 {
+               opp-200000000 {
                        opp-hz = /bits/ 64 <200000000>;
                        opp-microvolt = <950000>;
                };
-               opp@300000000 {
+               opp-300000000 {
                        opp-hz = /bits/ 64 <300000000>;
                        opp-microvolt = <1000000>;
                };
-               opp@400000000 {
+               opp-400000000 {
                        opp-hz = /bits/ 64 <400000000>;
                        opp-microvolt = <1100000>;
                };
-               opp@500000000 {
+               opp-500000000 {
                        opp-hz = /bits/ 64 <500000000>;
                        opp-microvolt = <1200000>;
                };
-               opp@600000000 {
+               opp-600000000 {
                        opp-hz = /bits/ 64 <600000000>;
                        opp-microvolt = <1250000>;
                };
index 1c01a6f843d8a43c07ab25dd18ffb57acd044c0b..28a2e45752fea34eb2efb576439409c0611d9d90 100644 (file)
 #define PIN_PC9__GPIO                  PINMUX_PIN(PIN_PC9, 0, 0)
 #define PIN_PC9__FIQ                   PINMUX_PIN(PIN_PC9, 1, 3)
 #define PIN_PC9__GTSUCOMP              PINMUX_PIN(PIN_PC9, 2, 1)
-#define PIN_PC9__ISC_D0                        PINMUX_PIN(PIN_PC9, 2, 1)
+#define PIN_PC9__ISC_D0                        PINMUX_PIN(PIN_PC9, 3, 1)
 #define PIN_PC9__TIOA4                 PINMUX_PIN(PIN_PC9, 4, 2)
 #define PIN_PC10                       74
 #define PIN_PC10__GPIO                 PINMUX_PIN(PIN_PC10, 0, 0)
index f2f6558a00f188937ca55ee5f44e689da5e1bf7a..04066f9cb8a31c643cba82ea337dd22cb7a85626 100644 (file)
                gpio-sck = <&gpio0 5 GPIO_ACTIVE_HIGH>;
                gpio-mosi = <&gpio0 4 GPIO_ACTIVE_HIGH>;
                /*
-                * This chipselect is active high. Just setting the flags
-                * to GPIO_ACTIVE_HIGH is not enough for the SPI DT bindings,
-                * it will be ignored, only the special "spi-cs-high" flag
-                * really counts.
+                * It's not actually active high, but the frameworks assume
+                * the polarity of the passed-in GPIO is "normal" (active
+                * high) then actively drives the line low to select the
+                * chip.
                 */
                cs-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
-               spi-cs-high;
                num-chipselects = <1>;
 
                /*
index 06dea6bce293b934e1146d26aa316ea8e36e80b5..080ce70cab12a6944af4120ed5a3b9ca9889411d 100644 (file)
@@ -55,53 +55,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       if (n == 0)
-               return;
-
-       if (i + n > SYSCALL_MAX_ARGS) {
-               unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
-               unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
-               pr_warn("%s called with max args %d, handling only %d\n",
-                       __func__, i + n, SYSCALL_MAX_ARGS);
-               memset(args_bad, 0, n_bad * sizeof(args[0]));
-               n = SYSCALL_MAX_ARGS - i;
-       }
-
-       if (i == 0) {
-               args[0] = regs->ARM_ORIG_r0;
-               args++;
-               i++;
-               n--;
-       }
-
-       memcpy(args, &regs->ARM_r0 + i, n * sizeof(args[0]));
+       args[0] = regs->ARM_ORIG_r0;
+       args++;
+
+       memcpy(args, &regs->ARM_r0 + 1, 5 * sizeof(args[0]));
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       if (n == 0)
-               return;
-
-       if (i + n > SYSCALL_MAX_ARGS) {
-               pr_warn("%s called with max args %d, handling only %d\n",
-                       __func__, i + n, SYSCALL_MAX_ARGS);
-               n = SYSCALL_MAX_ARGS - i;
-       }
-
-       if (i == 0) {
-               regs->ARM_ORIG_r0 = args[0];
-               args++;
-               i++;
-               n--;
-       }
-
-       memcpy(&regs->ARM_r0 + i, args, n * sizeof(args[0]));
+       regs->ARM_ORIG_r0 = args[0];
+       args++;
+
+       memcpy(&regs->ARM_r0 + 1, args, 5 * sizeof(args[0]));
 }
 
 static inline int syscall_get_arch(void)
index 51e808adb00cc23576b1f9148f0388542627d397..2a757dcaa1a5e9d63b5ae47833ef31d12ab94aa2 100644 (file)
@@ -591,13 +591,13 @@ static int __init at91_pm_backup_init(void)
 
        np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam");
        if (!np)
-               goto securam_fail;
+               goto securam_fail_no_ref_dev;
 
        pdev = of_find_device_by_node(np);
        of_node_put(np);
        if (!pdev) {
                pr_warn("%s: failed to find securam device!\n", __func__);
-               goto securam_fail;
+               goto securam_fail_no_ref_dev;
        }
 
        sram_pool = gen_pool_get(&pdev->dev, NULL);
@@ -620,6 +620,8 @@ static int __init at91_pm_backup_init(void)
        return 0;
 
 securam_fail:
+       put_device(&pdev->dev);
+securam_fail_no_ref_dev:
        iounmap(pm_data.sfrbu);
        pm_data.sfrbu = NULL;
        return ret;
index 53c316f7301e69fcbebbfe5d73bb48664180f5b6..fe4932fda01d7d0bc819c0ca4e6dcedb6b061081 100644 (file)
@@ -300,7 +300,7 @@ static struct resource iop13xx_adma_2_resources[] = {
        }
 };
 
-static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64);
+static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(32);
 static struct iop_adma_platform_data iop13xx_adma_0_data = {
        .hw_id = 0,
        .pool_size = PAGE_SIZE,
@@ -324,7 +324,7 @@ static struct platform_device iop13xx_adma_0_channel = {
        .resource = iop13xx_adma_0_resources,
        .dev = {
                .dma_mask = &iop13xx_adma_dmamask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
                .platform_data = (void *) &iop13xx_adma_0_data,
        },
 };
@@ -336,7 +336,7 @@ static struct platform_device iop13xx_adma_1_channel = {
        .resource = iop13xx_adma_1_resources,
        .dev = {
                .dma_mask = &iop13xx_adma_dmamask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
                .platform_data = (void *) &iop13xx_adma_1_data,
        },
 };
@@ -348,7 +348,7 @@ static struct platform_device iop13xx_adma_2_channel = {
        .resource = iop13xx_adma_2_resources,
        .dev = {
                .dma_mask = &iop13xx_adma_dmamask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
                .platform_data = (void *) &iop13xx_adma_2_data,
        },
 };
index db511ec2b1df6824cb6d3d24659cfebe2428d5ec..116feb6b261eb7b0e08ee7ce248e44682e537898 100644 (file)
@@ -152,7 +152,7 @@ static struct resource iop13xx_tpmi_3_resources[] = {
        }
 };
 
-u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64);
+u64 iop13xx_tpmi_mask = DMA_BIT_MASK(32);
 static struct platform_device iop13xx_tpmi_0_device = {
        .name = "iop-tpmi",
        .id = 0,
@@ -160,7 +160,7 @@ static struct platform_device iop13xx_tpmi_0_device = {
        .resource = iop13xx_tpmi_0_resources,
        .dev = {
                .dma_mask          = &iop13xx_tpmi_mask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
        },
 };
 
@@ -171,7 +171,7 @@ static struct platform_device iop13xx_tpmi_1_device = {
        .resource = iop13xx_tpmi_1_resources,
        .dev = {
                .dma_mask          = &iop13xx_tpmi_mask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
        },
 };
 
@@ -182,7 +182,7 @@ static struct platform_device iop13xx_tpmi_2_device = {
        .resource = iop13xx_tpmi_2_resources,
        .dev = {
                .dma_mask          = &iop13xx_tpmi_mask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
        },
 };
 
@@ -193,7 +193,7 @@ static struct platform_device iop13xx_tpmi_3_device = {
        .resource = iop13xx_tpmi_3_resources,
        .dev = {
                .dma_mask          = &iop13xx_tpmi_mask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
        },
 };
 
index 591543c81399b4f976b52599cf84b7cb22e512ea..3ea880f5fcb7338b8e419db9fd49c06cee263ca0 100644 (file)
@@ -65,6 +65,7 @@ static void m10v_smp_init(unsigned int max_cpus)
                writel(KERNEL_UNBOOT_FLAG, m10v_smp_base + cpu * 4);
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
 static void m10v_cpu_die(unsigned int l_cpu)
 {
        gic_cpu_if_down(0);
@@ -83,12 +84,15 @@ static int m10v_cpu_kill(unsigned int l_cpu)
 
        return 1;
 }
+#endif
 
 static struct smp_operations m10v_smp_ops __initdata = {
        .smp_prepare_cpus       = m10v_smp_init,
        .smp_boot_secondary     = m10v_boot_secondary,
+#ifdef CONFIG_HOTPLUG_CPU
        .cpu_die                = m10v_cpu_die,
        .cpu_kill               = m10v_cpu_kill,
+#endif
 };
 CPU_METHOD_OF_DECLARE(m10v_smp, "socionext,milbeaut-m10v-smp", &m10v_smp_ops);
 
index be30c3c061b46ee0c1adf3ce55a872eb7bc9c9c0..1b15d593837ed78ea22298ccc4ae60cb3de166f1 100644 (file)
@@ -182,6 +182,7 @@ static struct resource latch1_resources[] = {
 
 static struct bgpio_pdata latch1_pdata = {
        .label  = LATCH1_LABEL,
+       .base   = -1,
        .ngpio  = LATCH1_NGPIO,
 };
 
@@ -219,6 +220,7 @@ static struct resource latch2_resources[] = {
 
 static struct bgpio_pdata latch2_pdata = {
        .label  = LATCH2_LABEL,
+       .base   = -1,
        .ngpio  = LATCH2_NGPIO,
 };
 
index 1444b4b4bd9f85e54368c0e18ac31f3f2fc033eb..439e143cad7b5d4d8ef48122816f9acf436570c3 100644 (file)
@@ -250,8 +250,10 @@ static int __init omapdss_init_of(void)
        if (!node)
                return 0;
 
-       if (!of_device_is_available(node))
+       if (!of_device_is_available(node)) {
+               of_node_put(node);
                return 0;
+       }
 
        pdev = of_find_device_by_node(node);
 
index a4d1f8de3b5b23453ee4738723164a5ba8405424..d9612221e4848971f4ea27cf4f5d4c319073e439 100644 (file)
@@ -143,7 +143,7 @@ struct platform_device iop3xx_dma_0_channel = {
        .resource = iop3xx_dma_0_resources,
        .dev = {
                .dma_mask = &iop3xx_adma_dmamask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
                .platform_data = (void *) &iop3xx_dma_0_data,
        },
 };
@@ -155,7 +155,7 @@ struct platform_device iop3xx_dma_1_channel = {
        .resource = iop3xx_dma_1_resources,
        .dev = {
                .dma_mask = &iop3xx_adma_dmamask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
                .platform_data = (void *) &iop3xx_dma_1_data,
        },
 };
@@ -167,7 +167,7 @@ struct platform_device iop3xx_aau_channel = {
        .resource = iop3xx_aau_resources,
        .dev = {
                .dma_mask = &iop3xx_adma_dmamask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
                .platform_data = (void *) &iop3xx_aau_data,
        },
 };
index a6c81ce00f520625880c29c083b3f70384c3db1f..8647cb80a93bd222234f4951f2249ac0399ca025 100644 (file)
@@ -622,7 +622,7 @@ static struct platform_device orion_xor0_shared = {
        .resource       = orion_xor0_shared_resources,
        .dev            = {
                .dma_mask               = &orion_xor_dmamask,
-               .coherent_dma_mask      = DMA_BIT_MASK(64),
+               .coherent_dma_mask      = DMA_BIT_MASK(32),
                .platform_data          = &orion_xor0_pdata,
        },
 };
@@ -683,7 +683,7 @@ static struct platform_device orion_xor1_shared = {
        .resource       = orion_xor1_shared_resources,
        .dev            = {
                .dma_mask               = &orion_xor_dmamask,
-               .coherent_dma_mask      = DMA_BIT_MASK(64),
+               .coherent_dma_mask      = DMA_BIT_MASK(32),
                .platform_data          = &orion_xor1_pdata,
        },
 };
index 7c649f6b14cb6eb73ea8fb23ded74ed5152d3a70..cd7c76e58b09a60f75ccd510083bab730a2378b0 100644 (file)
                        rx-fifo-depth = <16384>;
                        snps,multicast-filter-bins = <256>;
                        iommus = <&smmu 1>;
+                       altr,sysmgr-syscon = <&sysmgr 0x44 0>;
                        status = "disabled";
                };
 
                        rx-fifo-depth = <16384>;
                        snps,multicast-filter-bins = <256>;
                        iommus = <&smmu 2>;
+                       altr,sysmgr-syscon = <&sysmgr 0x48 0>;
                        status = "disabled";
                };
 
                        rx-fifo-depth = <16384>;
                        snps,multicast-filter-bins = <256>;
                        iommus = <&smmu 3>;
+                       altr,sysmgr-syscon = <&sysmgr 0x4c 0>;
                        status = "disabled";
                };
 
index 33c44e857247e4a64847f22945c4c06bb7735ecf..0e34354b20927698482fddaf6814483394a18b93 100644 (file)
        snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>;
        snps,reset-active-low;
        snps,reset-delays-us = <0 10000 50000>;
-       tx_delay = <0x25>;
-       rx_delay = <0x11>;
+       tx_delay = <0x24>;
+       rx_delay = <0x18>;
        status = "okay";
 };
 
index 2157a528276bffae23afbaf3152db66292b7817a..79b4d1d4b5d6b67672dcbab1de19d274cecd5c5b 100644 (file)
@@ -46,8 +46,7 @@
 
        vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator {
                compatible = "regulator-fixed";
-               enable-active-high;
-               gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>;
+               gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>;
                pinctrl-names = "default";
                pinctrl-0 = <&usb20_host_drv>;
                regulator-name = "vcc_host1_5v";
index 84f14b132e8f5fb80bf3f178a72f5e138d144bd3..dabef1a21649ba44ee4b880d83d9b24591ac1d9d 100644 (file)
 
                sdmmc0 {
                        sdmmc0_clk: sdmmc0-clk {
-                               rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_4ma>;
+                               rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_8ma>;
                        };
 
                        sdmmc0_cmd: sdmmc0-cmd {
-                               rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_4ma>;
+                               rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_8ma>;
                        };
 
                        sdmmc0_dectn: sdmmc0-dectn {
                        };
 
                        sdmmc0_bus1: sdmmc0-bus1 {
-                               rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>;
+                               rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>;
                        };
 
                        sdmmc0_bus4: sdmmc0-bus4 {
-                               rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>,
-                                               <1 RK_PA1 1 &pcfg_pull_up_4ma>,
-                                               <1 RK_PA2 1 &pcfg_pull_up_4ma>,
-                                               <1 RK_PA3 1 &pcfg_pull_up_4ma>;
+                               rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>,
+                                               <1 RK_PA1 1 &pcfg_pull_up_8ma>,
+                                               <1 RK_PA2 1 &pcfg_pull_up_8ma>,
+                                               <1 RK_PA3 1 &pcfg_pull_up_8ma>;
                        };
 
                        sdmmc0_gpio: sdmmc0-gpio {
                        rgmiim1_pins: rgmiim1-pins {
                                rockchip,pins =
                                        /* mac_txclk */
-                                       <1 RK_PB4 2 &pcfg_pull_none_12ma>,
+                                       <1 RK_PB4 2 &pcfg_pull_none_8ma>,
                                        /* mac_rxclk */
-                                       <1 RK_PB5 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PB5 2 &pcfg_pull_none_4ma>,
                                        /* mac_mdio */
-                                       <1 RK_PC3 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PC3 2 &pcfg_pull_none_4ma>,
                                        /* mac_txen */
-                                       <1 RK_PD1 2 &pcfg_pull_none_12ma>,
+                                       <1 RK_PD1 2 &pcfg_pull_none_8ma>,
                                        /* mac_clk */
-                                       <1 RK_PC5 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PC5 2 &pcfg_pull_none_4ma>,
                                        /* mac_rxdv */
-                                       <1 RK_PC6 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PC6 2 &pcfg_pull_none_4ma>,
                                        /* mac_mdc */
-                                       <1 RK_PC7 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PC7 2 &pcfg_pull_none_4ma>,
                                        /* mac_rxd1 */
-                                       <1 RK_PB2 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PB2 2 &pcfg_pull_none_4ma>,
                                        /* mac_rxd0 */
-                                       <1 RK_PB3 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PB3 2 &pcfg_pull_none_4ma>,
                                        /* mac_txd1 */
-                                       <1 RK_PB0 2 &pcfg_pull_none_12ma>,
+                                       <1 RK_PB0 2 &pcfg_pull_none_8ma>,
                                        /* mac_txd0 */
-                                       <1 RK_PB1 2 &pcfg_pull_none_12ma>,
+                                       <1 RK_PB1 2 &pcfg_pull_none_8ma>,
                                        /* mac_rxd3 */
-                                       <1 RK_PB6 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PB6 2 &pcfg_pull_none_4ma>,
                                        /* mac_rxd2 */
-                                       <1 RK_PB7 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PB7 2 &pcfg_pull_none_4ma>,
                                        /* mac_txd3 */
-                                       <1 RK_PC0 2 &pcfg_pull_none_12ma>,
+                                       <1 RK_PC0 2 &pcfg_pull_none_8ma>,
                                        /* mac_txd2 */
-                                       <1 RK_PC1 2 &pcfg_pull_none_12ma>,
+                                       <1 RK_PC1 2 &pcfg_pull_none_8ma>,
 
                                        /* mac_txclk */
-                                       <0 RK_PB0 1 &pcfg_pull_none>,
+                                       <0 RK_PB0 1 &pcfg_pull_none_8ma>,
                                        /* mac_txen */
-                                       <0 RK_PB4 1 &pcfg_pull_none>,
+                                       <0 RK_PB4 1 &pcfg_pull_none_8ma>,
                                        /* mac_clk */
-                                       <0 RK_PD0 1 &pcfg_pull_none>,
+                                       <0 RK_PD0 1 &pcfg_pull_none_4ma>,
                                        /* mac_txd1 */
-                                       <0 RK_PC0 1 &pcfg_pull_none>,
+                                       <0 RK_PC0 1 &pcfg_pull_none_8ma>,
                                        /* mac_txd0 */
-                                       <0 RK_PC1 1 &pcfg_pull_none>,
+                                       <0 RK_PC1 1 &pcfg_pull_none_8ma>,
                                        /* mac_txd3 */
-                                       <0 RK_PC7 1 &pcfg_pull_none>,
+                                       <0 RK_PC7 1 &pcfg_pull_none_8ma>,
                                        /* mac_txd2 */
-                                       <0 RK_PC6 1 &pcfg_pull_none>;
+                                       <0 RK_PC6 1 &pcfg_pull_none_8ma>;
                        };
 
                        rmiim1_pins: rmiim1-pins {
index 4a543f2117d4212b9e26578a64db9ad982ff5c59..844eac939a97c58f9aea4a2e681b39dd6648f4f1 100644 (file)
 };
 
 &hdmi {
+       ddc-i2c-bus = <&i2c3>;
        pinctrl-names = "default";
        pinctrl-0 = <&hdmi_cec>;
        status = "okay";
index cccb83ad7fa8ea2e1f4251dd724edc62c754771b..c7e1a7837706c17eeffd96edd17bcc4da0009af2 100644 (file)
@@ -30,8 +30,8 @@ do {                                                                  \
 "      prfm    pstl1strm, %2\n"                                        \
 "1:    ldxr    %w1, %2\n"                                              \
        insn "\n"                                                       \
-"2:    stlxr   %w3, %w0, %2\n"                                         \
-"      cbnz    %w3, 1b\n"                                              \
+"2:    stlxr   %w0, %w3, %2\n"                                         \
+"      cbnz    %w0, 1b\n"                                              \
 "      dmb     ish\n"                                                  \
 "3:\n"                                                                 \
 "      .pushsection .fixup,\"ax\"\n"                                   \
@@ -57,23 +57,23 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
 
        switch (op) {
        case FUTEX_OP_SET:
-               __futex_atomic_op("mov  %w0, %w4",
+               __futex_atomic_op("mov  %w3, %w4",
                                  ret, oldval, uaddr, tmp, oparg);
                break;
        case FUTEX_OP_ADD:
-               __futex_atomic_op("add  %w0, %w1, %w4",
+               __futex_atomic_op("add  %w3, %w1, %w4",
                                  ret, oldval, uaddr, tmp, oparg);
                break;
        case FUTEX_OP_OR:
-               __futex_atomic_op("orr  %w0, %w1, %w4",
+               __futex_atomic_op("orr  %w3, %w1, %w4",
                                  ret, oldval, uaddr, tmp, oparg);
                break;
        case FUTEX_OP_ANDN:
-               __futex_atomic_op("and  %w0, %w1, %w4",
+               __futex_atomic_op("and  %w3, %w1, %w4",
                                  ret, oldval, uaddr, tmp, ~oparg);
                break;
        case FUTEX_OP_XOR:
-               __futex_atomic_op("eor  %w0, %w1, %w4",
+               __futex_atomic_op("eor  %w3, %w1, %w4",
                                  ret, oldval, uaddr, tmp, oparg);
                break;
        default:
index 905e1bb0e7bd023b7174da7a6a81459df87b40dc..cd9f4e9d04d3be6564843e821b2a612642717210 100644 (file)
@@ -73,4 +73,9 @@ static inline bool is_forbidden_offset_for_adrp(void *place)
 struct plt_entry get_plt_entry(u64 dst, void *pc);
 bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b);
 
+static inline bool plt_entry_is_initialized(const struct plt_entry *e)
+{
+       return e->adrp || e->add || e->br;
+}
+
 #endif /* __ASM_MODULE_H */
index ad8be16a39c9d18bdbd406f522c02432529c4cf6..a179df3674a1aa207dfdead37e47219353b67b91 100644 (file)
@@ -65,52 +65,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       if (n == 0)
-               return;
-
-       if (i + n > SYSCALL_MAX_ARGS) {
-               unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
-               unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
-               pr_warning("%s called with max args %d, handling only %d\n",
-                          __func__, i + n, SYSCALL_MAX_ARGS);
-               memset(args_bad, 0, n_bad * sizeof(args[0]));
-       }
-
-       if (i == 0) {
-               args[0] = regs->orig_x0;
-               args++;
-               i++;
-               n--;
-       }
-
-       memcpy(args, &regs->regs[i], n * sizeof(args[0]));
+       args[0] = regs->orig_x0;
+       args++;
+
+       memcpy(args, &regs->regs[1], 5 * sizeof(args[0]));
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       if (n == 0)
-               return;
-
-       if (i + n > SYSCALL_MAX_ARGS) {
-               pr_warning("%s called with max args %d, handling only %d\n",
-                          __func__, i + n, SYSCALL_MAX_ARGS);
-               n = SYSCALL_MAX_ARGS - i;
-       }
-
-       if (i == 0) {
-               regs->orig_x0 = args[0];
-               args++;
-               i++;
-               n--;
-       }
-
-       memcpy(&regs->regs[i], args, n * sizeof(args[0]));
+       regs->orig_x0 = args[0];
+       args++;
+
+       memcpy(&regs->regs[1], args, 5 * sizeof(args[0]));
 }
 
 /*
index 8e4431a8821f5920e49910dd287db5882e73f330..07b298120182042d2a1dea18160ef63e5a678b9d 100644 (file)
@@ -107,8 +107,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
                trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
                if (!plt_entries_equal(mod->arch.ftrace_trampoline,
                                       &trampoline)) {
-                       if (!plt_entries_equal(mod->arch.ftrace_trampoline,
-                                              &(struct plt_entry){})) {
+                       if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) {
                                pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
                                return -EINVAL;
                        }
index 5ba4465e44f09028c89fb190b7d65927635a9d10..ea94cf8f9dc6d15f58a7c8e298eba6d8bfdecede 100644 (file)
@@ -94,6 +94,9 @@ static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
        unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
        unsigned long high = low + SDEI_STACK_SIZE;
 
+       if (!low)
+               return false;
+
        if (sp < low || sp >= high)
                return false;
 
@@ -111,6 +114,9 @@ static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
        unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
        unsigned long high = low + SDEI_STACK_SIZE;
 
+       if (!low)
+               return false;
+
        if (sp < low || sp >= high)
                return false;
 
index 8ad119c3f665d4e8001038ccf3bd6dcb62e2e224..29755989f616c187481803b27ca9dbfcf0a7847b 100644 (file)
@@ -102,10 +102,16 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 {
        struct stackframe frame;
-       int skip;
+       int skip = 0;
 
        pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
 
+       if (regs) {
+               if (user_mode(regs))
+                       return;
+               skip = 1;
+       }
+
        if (!tsk)
                tsk = current;
 
@@ -126,7 +132,6 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
        frame.graph = 0;
 #endif
 
-       skip = !!regs;
        printk("Call trace:\n");
        do {
                /* skip until specified stack frame */
@@ -176,15 +181,13 @@ static int __die(const char *str, int err, struct pt_regs *regs)
                return ret;
 
        print_modules();
-       __show_regs(regs);
        pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
                 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
                 end_of_stack(tsk));
+       show_regs(regs);
 
-       if (!user_mode(regs)) {
-               dump_backtrace(regs, tsk);
+       if (!user_mode(regs))
                dump_instr(KERN_EMERG, regs);
-       }
 
        return ret;
 }
index ae2be315ee9c98d8440f4409be8b67cc261f8858..15ba8599858e6be5a860e23f338c8bd292da7207 100644 (file)
@@ -46,78 +46,27 @@ static inline void syscall_set_return_value(struct task_struct *task,
 }
 
 static inline void syscall_get_arguments(struct task_struct *task,
-                                        struct pt_regs *regs, unsigned int i,
-                                        unsigned int n, unsigned long *args)
+                                        struct pt_regs *regs,
+                                        unsigned long *args)
 {
-       switch (i) {
-       case 0:
-               if (!n--)
-                       break;
-               *args++ = regs->a4;
-       case 1:
-               if (!n--)
-                       break;
-               *args++ = regs->b4;
-       case 2:
-               if (!n--)
-                       break;
-               *args++ = regs->a6;
-       case 3:
-               if (!n--)
-                       break;
-               *args++ = regs->b6;
-       case 4:
-               if (!n--)
-                       break;
-               *args++ = regs->a8;
-       case 5:
-               if (!n--)
-                       break;
-               *args++ = regs->b8;
-       case 6:
-               if (!n--)
-                       break;
-       default:
-               BUG();
-       }
+       *args++ = regs->a4;
+       *args++ = regs->b4;
+       *args++ = regs->a6;
+       *args++ = regs->b6;
+       *args++ = regs->a8;
+       *args   = regs->b8;
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       switch (i) {
-       case 0:
-               if (!n--)
-                       break;
-               regs->a4 = *args++;
-       case 1:
-               if (!n--)
-                       break;
-               regs->b4 = *args++;
-       case 2:
-               if (!n--)
-                       break;
-               regs->a6 = *args++;
-       case 3:
-               if (!n--)
-                       break;
-               regs->b6 = *args++;
-       case 4:
-               if (!n--)
-                       break;
-               regs->a8 = *args++;
-       case 5:
-               if (!n--)
-                       break;
-               regs->a9 = *args++;
-       case 6:
-               if (!n)
-                       break;
-       default:
-               BUG();
-       }
+       regs->a4 = *args++;
+       regs->b4 = *args++;
+       regs->a6 = *args++;
+       regs->b6 = *args++;
+       regs->a8 = *args++;
+       regs->a9 = *args;
 }
 
 #endif /* __ASM_C6X_SYSCALLS_H */
index d637445737b78fd5c78c9994173a1e7c73eb3d1f..bda0a446c63ead759d9360d769fe68647a28f83d 100644 (file)
@@ -43,30 +43,20 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
 
 static inline void
 syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
-                     unsigned int i, unsigned int n, unsigned long *args)
+                     unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-       if (i == 0) {
-               args[0] = regs->orig_a0;
-               args++;
-               i++;
-               n--;
-       }
-       memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
+       args[0] = regs->orig_a0;
+       args++;
+       memcpy(args, &regs->a1, 5 * sizeof(args[0]));
 }
 
 static inline void
 syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
-                     unsigned int i, unsigned int n, const unsigned long *args)
+                     const unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-       if (i == 0) {
-               regs->orig_a0 = args[0];
-               args++;
-               i++;
-               n--;
-       }
-       memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
+       regs->orig_a0 = args[0];
+       args++;
+       memcpy(&regs->a1, args, 5 * sizeof(regs->a1));
 }
 
 static inline int
index 924990401237126585ea8fd105e4b57e8f9e5b24..ddd483c6ca95c9df50e9ed7b8c820c9884afcbeb 100644 (file)
@@ -17,34 +17,14 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
 
 static inline void
 syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
-                     unsigned int i, unsigned int n, unsigned long *args)
+                     unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-
-       while (n > 0) {
-               switch (i) {
-               case 0:
-                       *args++ = regs->er1;
-                       break;
-               case 1:
-                       *args++ = regs->er2;
-                       break;
-               case 2:
-                       *args++ = regs->er3;
-                       break;
-               case 3:
-                       *args++ = regs->er4;
-                       break;
-               case 4:
-                       *args++ = regs->er5;
-                       break;
-               case 5:
-                       *args++ = regs->er6;
-                       break;
-               }
-               i++;
-               n--;
-       }
+       *args++ = regs->er1;
+       *args++ = regs->er2;
+       *args++ = regs->er3;
+       *args++ = regs->er4;
+       *args++ = regs->er5;
+       *args   = regs->er6;
 }
 
 
index 4af9c7b6f13af9490e4bee7b9f608c7d467cecb3..ae3a1e24fabd7193ff7d3dc142c4ca7d123f56e6 100644 (file)
@@ -37,10 +37,8 @@ static inline long syscall_get_nr(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-       memcpy(args, &(&regs->r00)[i], n * sizeof(args[0]));
+       memcpy(args, &(&regs->r00)[0], 6 * sizeof(args[0]));
 }
 #endif
index 1d0b875fec44fc0fd8a70876e8ee6191f66f6005..0d9e7fab4a79fddcc63d24c30f002e73d2b91db0 100644 (file)
@@ -59,26 +59,19 @@ static inline void syscall_set_return_value(struct task_struct *task,
 }
 
 extern void ia64_syscall_get_set_arguments(struct task_struct *task,
-       struct pt_regs *regs, unsigned int i, unsigned int n,
-       unsigned long *args, int rw);
+       struct pt_regs *regs, unsigned long *args, int rw);
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-
-       ia64_syscall_get_set_arguments(task, regs, i, n, args, 0);
+       ia64_syscall_get_set_arguments(task, regs, args, 0);
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-
-       ia64_syscall_get_set_arguments(task, regs, i, n, args, 1);
+       ia64_syscall_get_set_arguments(task, regs, args, 1);
 }
 
 static inline int syscall_get_arch(void)
index 6d50ede0ed691ca1899540722e65edb3cf896510..bf9c24d9ce84e66d1519ce7e5aa65330628d221b 100644 (file)
@@ -2179,12 +2179,11 @@ static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
 }
 
 void ia64_syscall_get_set_arguments(struct task_struct *task,
-       struct pt_regs *regs, unsigned int i, unsigned int n,
-       unsigned long *args, int rw)
+       struct pt_regs *regs, unsigned long *args, int rw)
 {
        struct syscall_get_set_args data = {
-               .i = i,
-               .n = n,
+               .i = 0,
+               .n = 6,
                .args = args,
                .regs = regs,
                .rw = rw,
index 220decd605a4aded46a99b445e54bf27c4adc821..833d3a53dab30182b586dd364cd323d1db07835a 100644 (file)
@@ -82,18 +82,22 @@ static inline void microblaze_set_syscall_arg(struct pt_regs *regs,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
+       unsigned int i = 0;
+       unsigned int n = 6;
+
        while (n--)
                *args++ = microblaze_get_syscall_arg(regs, i++);
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
+       unsigned int i = 0;
+       unsigned int n = 6;
+
        while (n--)
                microblaze_set_syscall_arg(regs, i++, *args++);
 }
index f607888d24838be1c7d1ecd0fe7c20736b7d2ed1..184eb65a6ba71a5bea1e6b39cbe5d389a9764416 100644 (file)
@@ -1,6 +1,10 @@
 # require CONFIG_CPU_MIPS32_R2=y
 
 CONFIG_LEGACY_BOARD_OCELOT=y
+CONFIG_FIT_IMAGE_FDT_OCELOT=y
+
+CONFIG_BRIDGE=y
+CONFIG_GENERIC_PHY=y
 
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
@@ -19,6 +23,8 @@ CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
 
 CONFIG_NETDEVICES=y
+CONFIG_NET_SWITCHDEV=y
+CONFIG_NET_DSA=y
 CONFIG_MSCC_OCELOT_SWITCH=y
 CONFIG_MSCC_OCELOT_SWITCH_OCELOT=y
 CONFIG_MDIO_MSCC_MIIM=y
@@ -35,6 +41,8 @@ CONFIG_SPI_DESIGNWARE=y
 CONFIG_SPI_DW_MMIO=y
 CONFIG_SPI_SPIDEV=y
 
+CONFIG_PINCTRL_OCELOT=y
+
 CONFIG_GPIO_SYSFS=y
 
 CONFIG_POWER_RESET=y
index 6cf8ffb5367ec3fb725aac26c701d0ae5d81923c..a2b4748655df4d1466d037b971855c8046c589e8 100644 (file)
@@ -116,9 +116,10 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
+       unsigned int i = 0;
+       unsigned int n = 6;
        int ret;
 
        /* O32 ABI syscall() */
index 6e574c02e4c3b81137618c97fe9bc176c5a40d52..ea781b29f7f17291d90391c87a8a250772d17a37 100644 (file)
@@ -33,6 +33,7 @@
 #include <asm/processor.h>
 #include <asm/sigcontext.h>
 #include <linux/uaccess.h>
+#include <asm/irq_regs.h>
 
 static struct hard_trap_info {
        unsigned char tt;       /* Trap type code for MIPS R3xxx and R4xxx */
@@ -214,7 +215,7 @@ void kgdb_call_nmi_hook(void *ignored)
        old_fs = get_fs();
        set_fs(KERNEL_DS);
 
-       kgdb_nmicallback(raw_smp_processor_id(), NULL);
+       kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
 
        set_fs(old_fs);
 }
index 0057c910bc2f34de0f518c43d2e234c845db0da1..3a62f80958e170527a93f4058d60f5372d5773ee 100644 (file)
@@ -1419,7 +1419,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
 
                sd.nr = syscall;
                sd.arch = syscall_get_arch();
-               syscall_get_arguments(current, regs, 0, 6, args);
+               syscall_get_arguments(current, regs, args);
                for (i = 0; i < 6; i++)
                        sd.args[i] = args[i];
                sd.instruction_pointer = KSTK_EIP(current);
index 710a59764b01c164d3ffae92f18a394224bdc153..a32f843cdbe02299e34bf7f0897ad61f6e23dce5 100644 (file)
@@ -118,7 +118,6 @@ static void shutdown_bridge_irq(struct irq_data *d)
 {
        struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
        struct bridge_controller *bc;
-       int pin = hd->pin;
 
        if (!hd)
                return;
@@ -126,7 +125,7 @@ static void shutdown_bridge_irq(struct irq_data *d)
        disable_hub_irq(d);
 
        bc = hd->bc;
-       bridge_clr(bc, b_int_enable, (1 << pin));
+       bridge_clr(bc, b_int_enable, (1 << hd->pin));
        bridge_read(bc, b_wid_tflush);
 }
 
index f7e5e86765fe8efe51283d7c350a6a4ab8b73863..671ebd357496c4e1608b240d2ba23c4d3d9efa42 100644 (file)
@@ -108,81 +108,41 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
  * syscall_get_arguments - extract system call parameter values
  * @task:      task of interest, must be blocked
  * @regs:      task_pt_regs() of @task
- * @i:         argument index [0,5]
- * @n:         number of arguments; n+i must be [1,6].
  * @args:      array filled with argument values
  *
- * Fetches @n arguments to the system call starting with the @i'th argument
- * (from 0 through 5).  Argument @i is stored in @args[0], and so on.
- * An arch inline version is probably optimal when @i and @n are constants.
+ * Fetches 6 arguments to the system call (from 0 through 5). The first
+ * argument is stored in @args[0], and so on.
  *
  * It's only valid to call this when @task is stopped for tracing on
  * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
- * It's invalid to call this with @i + @n > 6; we only support system calls
- * taking up to 6 arguments.
  */
 #define SYSCALL_MAX_ARGS 6
 void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
-                          unsigned int i, unsigned int n, unsigned long *args)
+                          unsigned long *args)
 {
-       if (n == 0)
-               return;
-       if (i + n > SYSCALL_MAX_ARGS) {
-               unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
-               unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
-               pr_warning("%s called with max args %d, handling only %d\n",
-                          __func__, i + n, SYSCALL_MAX_ARGS);
-               memset(args_bad, 0, n_bad * sizeof(args[0]));
-               memset(args_bad, 0, n_bad * sizeof(args[0]));
-       }
-
-       if (i == 0) {
-               args[0] = regs->orig_r0;
-               args++;
-               i++;
-               n--;
-       }
-
-       memcpy(args, &regs->uregs[0] + i, n * sizeof(args[0]));
+       args[0] = regs->orig_r0;
+       args++;
+       memcpy(args, &regs->uregs[0] + 1, 5 * sizeof(args[0]));
 }
 
 /**
  * syscall_set_arguments - change system call parameter value
  * @task:      task of interest, must be in system call entry tracing
  * @regs:      task_pt_regs() of @task
- * @i:         argument index [0,5]
- * @n:         number of arguments; n+i must be [1,6].
  * @args:      array of argument values to store
  *
- * Changes @n arguments to the system call starting with the @i'th argument.
- * Argument @i gets value @args[0], and so on.
- * An arch inline version is probably optimal when @i and @n are constants.
+ * Changes 6 arguments to the system call. The first argument gets value
+ * @args[0], and so on.
  *
  * It's only valid to call this when @task is stopped for tracing on
  * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
- * It's invalid to call this with @i + @n > 6; we only support system calls
- * taking up to 6 arguments.
  */
 void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
-                          unsigned int i, unsigned int n,
                           const unsigned long *args)
 {
-       if (n == 0)
-               return;
-
-       if (i + n > SYSCALL_MAX_ARGS) {
-               pr_warn("%s called with max args %d, handling only %d\n",
-                       __func__, i + n, SYSCALL_MAX_ARGS);
-               n = SYSCALL_MAX_ARGS - i;
-       }
-
-       if (i == 0) {
-               regs->orig_r0 = args[0];
-               args++;
-               i++;
-               n--;
-       }
+       regs->orig_r0 = args[0];
+       args++;
 
-       memcpy(&regs->uregs[0] + i, args, n * sizeof(args[0]));
+       memcpy(&regs->uregs[0] + 1, args, 5 * sizeof(args[0]));
 }
 #endif /* _ASM_NDS32_SYSCALL_H */
index 9de220854c4ad88f43ea579cbcf51c250cb6e688..d7624ed06efb6c9ea2e616c23cd20b030a53b1c8 100644 (file)
@@ -58,81 +58,25 @@ static inline void syscall_set_return_value(struct task_struct *task,
 }
 
 static inline void syscall_get_arguments(struct task_struct *task,
-       struct pt_regs *regs, unsigned int i, unsigned int n,
-       unsigned long *args)
+       struct pt_regs *regs, unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-
-       switch (i) {
-       case 0:
-               if (!n--)
-                       break;
-               *args++ = regs->r4;
-       case 1:
-               if (!n--)
-                       break;
-               *args++ = regs->r5;
-       case 2:
-               if (!n--)
-                       break;
-               *args++ = regs->r6;
-       case 3:
-               if (!n--)
-                       break;
-               *args++ = regs->r7;
-       case 4:
-               if (!n--)
-                       break;
-               *args++ = regs->r8;
-       case 5:
-               if (!n--)
-                       break;
-               *args++ = regs->r9;
-       case 6:
-               if (!n--)
-                       break;
-       default:
-               BUG();
-       }
+       *args++ = regs->r4;
+       *args++ = regs->r5;
+       *args++ = regs->r6;
+       *args++ = regs->r7;
+       *args++ = regs->r8;
+       *args   = regs->r9;
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
-       struct pt_regs *regs, unsigned int i, unsigned int n,
-       const unsigned long *args)
+       struct pt_regs *regs, const unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-
-       switch (i) {
-       case 0:
-               if (!n--)
-                       break;
-               regs->r4 = *args++;
-       case 1:
-               if (!n--)
-                       break;
-               regs->r5 = *args++;
-       case 2:
-               if (!n--)
-                       break;
-               regs->r6 = *args++;
-       case 3:
-               if (!n--)
-                       break;
-               regs->r7 = *args++;
-       case 4:
-               if (!n--)
-                       break;
-               regs->r8 = *args++;
-       case 5:
-               if (!n--)
-                       break;
-               regs->r9 = *args++;
-       case 6:
-               if (!n)
-                       break;
-       default:
-               BUG();
-       }
+       regs->r4 = *args++;
+       regs->r5 = *args++;
+       regs->r6 = *args++;
+       regs->r7 = *args++;
+       regs->r8 = *args++;
+       regs->r9 = *args;
 }
 
 #endif
index 2db9f1cf0694c0f2c6bdaec77953f62fb4fe6372..b4ff07c1baed5d13c9abb0d025a1ece11ee78ad0 100644 (file)
@@ -56,20 +56,16 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
 
 static inline void
 syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
-                     unsigned int i, unsigned int n, unsigned long *args)
+                     unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-
-       memcpy(args, &regs->gpr[3 + i], n * sizeof(args[0]));
+       memcpy(args, &regs->gpr[3], 6 * sizeof(args[0]));
 }
 
 static inline void
 syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
-                     unsigned int i, unsigned int n, const unsigned long *args)
+                     const unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-
-       memcpy(&regs->gpr[3 + i], args, n * sizeof(args[0]));
+       memcpy(&regs->gpr[3], args, 6 * sizeof(args[0]));
 }
 
 static inline int syscall_get_arch(void)
index 2a27b275ab092cc60b3d003250aaaf647aa9c916..9ff033d261ab381c9e356fea458d768170f9effc 100644 (file)
@@ -22,13 +22,14 @@ unsigned long profile_pc(struct pt_regs *);
 
 static inline unsigned long regs_return_value(struct pt_regs *regs)
 {
-       return regs->gr[20];
+       return regs->gr[28];
 }
 
 static inline void instruction_pointer_set(struct pt_regs *regs,
                                                unsigned long val)
 {
-        regs->iaoq[0] = val;
+       regs->iaoq[0] = val;
+       regs->iaoq[1] = val + 4;
 }
 
 /* Query offset/name of register from its name/offset */
index 8bff1a58c97f1b107dabf79e172f5ecb56c5db2d..62a6d477fae0197cdba9d62044e31104f1b05192 100644 (file)
@@ -18,29 +18,15 @@ static inline long syscall_get_nr(struct task_struct *tsk,
 }
 
 static inline void syscall_get_arguments(struct task_struct *tsk,
-                                        struct pt_regs *regs, unsigned int i,
-                                        unsigned int n, unsigned long *args)
+                                        struct pt_regs *regs,
+                                        unsigned long *args)
 {
-       BUG_ON(i);
-
-       switch (n) {
-       case 6:
-               args[5] = regs->gr[21];
-       case 5:
-               args[4] = regs->gr[22];
-       case 4:
-               args[3] = regs->gr[23];
-       case 3:
-               args[2] = regs->gr[24];
-       case 2:
-               args[1] = regs->gr[25];
-       case 1:
-               args[0] = regs->gr[26];
-       case 0:
-               break;
-       default:
-               BUG();
-       }
+       args[5] = regs->gr[21];
+       args[4] = regs->gr[22];
+       args[3] = regs->gr[23];
+       args[2] = regs->gr[24];
+       args[1] = regs->gr[25];
+       args[0] = regs->gr[26];
 }
 
 static inline long syscall_get_return_value(struct task_struct *task,
index eb39e7e380d7e27b24f6bae39ae0e6c3583511e3..841db71958cdb50dff183dd058a9b09a5ec81421 100644 (file)
@@ -210,12 +210,6 @@ void __cpuidle arch_cpu_idle(void)
 
 static int __init parisc_idle_init(void)
 {
-       const char *marker;
-
-       /* check QEMU/SeaBIOS marker in PAGE0 */
-       marker = (char *) &PAGE0->pad0;
-       running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
-
        if (!running_on_qemu)
                cpu_idle_poll_ctrl(1);
 
index 15dd9e21be7eac6d1fcf37d67f72de9b227bfa75..d908058d05c10bf4880e361070c30a42b668fd7e 100644 (file)
@@ -397,6 +397,9 @@ void __init start_parisc(void)
        int ret, cpunum;
        struct pdc_coproc_cfg coproc_cfg;
 
+       /* check QEMU/SeaBIOS marker in PAGE0 */
+       running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0);
+
        cpunum = smp_processor_id();
 
        init_cpu_topology();
index 598cdcdd13553dea4a80a9b72196dbee8987cd61..8ddd4a91bdc1e2fe9a2e4a617b32cc0e6e15e572 100644 (file)
@@ -352,7 +352,7 @@ static inline bool strict_kernel_rwx_enabled(void)
 #if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) &&  \
        defined (CONFIG_PPC_64K_PAGES)
 #define MAX_PHYSMEM_BITS        51
-#elif defined(CONFIG_SPARSEMEM)
+#elif defined(CONFIG_PPC64)
 #define MAX_PHYSMEM_BITS        46
 #endif
 
index 1a0e7a8b1c811cf5d089c5ac68eb96d189ad702d..1243045bad2d633d4bd2df3d3e086bd2988987d0 100644 (file)
@@ -65,22 +65,20 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
        unsigned long val, mask = -1UL;
-
-       BUG_ON(i + n > 6);
+       unsigned int n = 6;
 
 #ifdef CONFIG_COMPAT
        if (test_tsk_thread_flag(task, TIF_32BIT))
                mask = 0xffffffff;
 #endif
        while (n--) {
-               if (n == 0 && i == 0)
+               if (n == 0)
                        val = regs->orig_gpr3;
                else
-                       val = regs->gpr[3 + i + n];
+                       val = regs->gpr[3 + n];
 
                args[n] = val & mask;
        }
@@ -88,15 +86,12 @@ static inline void syscall_get_arguments(struct task_struct *task,
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-       memcpy(&regs->gpr[3 + i], args, n * sizeof(args[0]));
+       memcpy(&regs->gpr[3], args, 6 * sizeof(args[0]));
 
        /* Also copy the first argument into orig_gpr3 */
-       if (i == 0 && n > 0)
-               regs->orig_gpr3 = args[0];
+       regs->orig_gpr3 = args[0];
 }
 
 static inline int syscall_get_arch(void)
index a5b8fbae56a03b491f0982562f3d590cff16ca5f..9481a117e24255173231ac687c9e99b730bff420 100644 (file)
@@ -656,11 +656,17 @@ EXC_COMMON_BEGIN(data_access_slb_common)
        ld      r4,PACA_EXSLB+EX_DAR(r13)
        std     r4,_DAR(r1)
        addi    r3,r1,STACK_FRAME_OVERHEAD
+BEGIN_MMU_FTR_SECTION
+       /* HPT case, do SLB fault */
        bl      do_slb_fault
        cmpdi   r3,0
        bne-    1f
        b       fast_exception_return
 1:     /* Error case */
+MMU_FTR_SECTION_ELSE
+       /* Radix case, access is outside page table range */
+       li      r3,-EFAULT
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
        std     r3,RESULT(r1)
        bl      save_nvgprs
        RECONCILE_IRQ_STATE(r10, r11)
@@ -705,11 +711,17 @@ EXC_COMMON_BEGIN(instruction_access_slb_common)
        EXCEPTION_PROLOG_COMMON(0x480, PACA_EXSLB)
        ld      r4,_NIP(r1)
        addi    r3,r1,STACK_FRAME_OVERHEAD
+BEGIN_MMU_FTR_SECTION
+       /* HPT case, do SLB fault */
        bl      do_slb_fault
        cmpdi   r3,0
        bne-    1f
        b       fast_exception_return
 1:     /* Error case */
+MMU_FTR_SECTION_ELSE
+       /* Radix case, access is outside page table range */
+       li      r3,-EFAULT
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
        std     r3,RESULT(r1)
        bl      save_nvgprs
        RECONCILE_IRQ_STATE(r10, r11)
index 48051c8977c5603a1ac9f8b730c0283ab04497d8..e25b615e9f9e642d34e9387aac7db652131a466f 100644 (file)
@@ -851,10 +851,6 @@ __secondary_start:
        tophys(r4,r2)
        addi    r4,r4,THREAD    /* phys address of our thread_struct */
        mtspr   SPRN_SPRG_THREAD,r4
-#ifdef CONFIG_PPC_RTAS
-       li      r3,0
-       stw     r3, RTAS_SP(r4)         /* 0 => not in RTAS */
-#endif
        lis     r4, (swapper_pg_dir - PAGE_OFFSET)@h
        ori     r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
        mtspr   SPRN_SPRG_PGDIR, r4
@@ -941,10 +937,6 @@ start_here:
        tophys(r4,r2)
        addi    r4,r4,THREAD    /* init task's THREAD */
        mtspr   SPRN_SPRG_THREAD,r4
-#ifdef CONFIG_PPC_RTAS
-       li      r3,0
-       stw     r3, RTAS_SP(r4)         /* 0 => not in RTAS */
-#endif
        lis     r4, (swapper_pg_dir - PAGE_OFFSET)@h
        ori     r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
        mtspr   SPRN_SPRG_PGDIR, r4
index 683b5b3805bd17493d97c261afc19279ac76b69f..cd381e2291dfeb38a569fed214778838cef42a2e 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/kvm_host.h>
 #include <linux/init.h>
 #include <linux/export.h>
+#include <linux/kmemleak.h>
 #include <linux/kvm_para.h>
 #include <linux/slab.h>
 #include <linux/of.h>
@@ -712,6 +713,12 @@ static void kvm_use_magic_page(void)
 
 static __init void kvm_free_tmp(void)
 {
+       /*
+        * Inform kmemleak about the hole in the .bss section since the
+        * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y.
+        */
+       kmemleak_free_part(&kvm_tmp[kvm_tmp_index],
+                          ARRAY_SIZE(kvm_tmp) - kvm_tmp_index);
        free_reserved_area(&kvm_tmp[kvm_tmp_index],
                           &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
 }
index 1e0bc5955a400601b106949f14c7a0ca64d1a6a6..afd516b572f8637447315ec882c08189bcf2fb4d 100644 (file)
@@ -98,7 +98,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
         * can be used, r7 contains NSEC_PER_SEC.
         */
 
-       lwz     r5,WTOM_CLOCK_SEC(r9)
+       lwz     r5,(WTOM_CLOCK_SEC+LOPART)(r9)
        lwz     r6,WTOM_CLOCK_NSEC(r9)
 
        /* We now have our offset in r5,r6. We create a fake dependency
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
new file mode 100644 (file)
index 0000000..1a911ed
--- /dev/null
@@ -0,0 +1,84 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_CGROUP_BPF=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_ARCH_RV32I=y
+CONFIG_SMP=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NETLINK_DIAG=y
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCIE_XILINX=y
+CONFIG_DEVTMPFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_NETDEVICES=y
+CONFIG_VIRTIO_NET=y
+CONFIG_MACB=y
+CONFIG_E1000E=y
+CONFIG_R8169=y
+CONFIG_MICROSEMI_PHY=y
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
+CONFIG_HVC_RISCV_SBI=y
+# CONFIG_PTP_1588_CLOCK is not set
+CONFIG_DRM=y
+CONFIG_DRM_RADEON=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PLATFORM=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_SIFIVE_PLIC=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_CRYPTO_DEV_VIRTIO=y
+CONFIG_PRINTK_TIME=y
+# CONFIG_RCU_TRACE is not set
index 57afe604b495bef44894b5088517c103376684d4..c207f6634b91c4ecc8f60b759c82056dd5624ed4 100644 (file)
@@ -26,7 +26,7 @@ enum fixed_addresses {
 };
 
 #define FIXADDR_SIZE           (__end_of_fixed_addresses * PAGE_SIZE)
-#define FIXADDR_TOP            (PAGE_OFFSET)
+#define FIXADDR_TOP            (VMALLOC_START)
 #define FIXADDR_START          (FIXADDR_TOP - FIXADDR_SIZE)
 
 #define FIXMAP_PAGE_IO         PAGE_KERNEL
index bba3da6ef1572f41db64e59ca203ae32b9139180..a3d5273ded7c6d0782356f01abf7d5cdca753bc5 100644 (file)
@@ -72,32 +72,20 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-       if (i == 0) {
-               args[0] = regs->orig_a0;
-               args++;
-               i++;
-               n--;
-       }
-       memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
+       args[0] = regs->orig_a0;
+       args++;
+       memcpy(args, &regs->a1, 5 * sizeof(args[0]));
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-        if (i == 0) {
-                regs->orig_a0 = args[0];
-                args++;
-                i++;
-                n--;
-        }
-       memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
+       regs->orig_a0 = args[0];
+       args++;
+       memcpy(&regs->a1, args, 5 * sizeof(regs->a1));
 }
 
 static inline int syscall_get_arch(void)
index a00168b980d2e6ca265ae0424045508275fbbe3f..fb53a8089e769473434493d59bc408079dcbb519 100644 (file)
@@ -300,7 +300,7 @@ do {                                                                \
                "       .balign 4\n"                            \
                "4:\n"                                          \
                "       li %0, %6\n"                            \
-               "       jump 2b, %1\n"                          \
+               "       jump 3b, %1\n"                          \
                "       .previous\n"                            \
                "       .section __ex_table,\"a\"\n"            \
                "       .balign " RISCV_SZPTR "\n"                      \
index f13f7f276639d504679034a36c53edc15f25dfe1..598568168d3511406fea38b23360c7e28a50a41f 100644 (file)
@@ -4,7 +4,6 @@
 
 ifdef CONFIG_FTRACE
 CFLAGS_REMOVE_ftrace.o = -pg
-CFLAGS_REMOVE_setup.o = -pg
 endif
 
 extra-y += head.o
@@ -29,8 +28,6 @@ obj-y += vdso.o
 obj-y  += cacheinfo.o
 obj-y  += vdso/
 
-CFLAGS_setup.o := -mcmodel=medany
-
 obj-$(CONFIG_FPU)              += fpu.o
 obj-$(CONFIG_SMP)              += smpboot.o
 obj-$(CONFIG_SMP)              += smp.o
index 7dd308129b40f1862ab04dc1e12c790bf7c111fe..2872edce894d1e0b79d58a4ed735649dd8261408 100644 (file)
@@ -141,7 +141,7 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location,
 {
        s32 hi20;
 
-       if (IS_ENABLED(CMODEL_MEDLOW)) {
+       if (IS_ENABLED(CONFIG_CMODEL_MEDLOW)) {
                pr_err(
                  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
                  me->name, (long long)v, location);
index ecb654f6a79ef105931a51950d520c1af845edff..540a331d1376922c62ba17bf0d9c786714d89948 100644 (file)
@@ -48,14 +48,6 @@ struct screen_info screen_info = {
 };
 #endif
 
-unsigned long va_pa_offset;
-EXPORT_SYMBOL(va_pa_offset);
-unsigned long pfn_base;
-EXPORT_SYMBOL(pfn_base);
-
-unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
-EXPORT_SYMBOL(empty_zero_page);
-
 /* The lucky hart to first increment this variable will boot the other cores */
 atomic_t hart_lottery;
 unsigned long boot_cpu_hartid;
index eb22ab49b3e008ec4ab677778302d5dbbea358b1..b68aac7018031cd5afe4ebb293051cbcc814969e 100644 (file)
@@ -1,3 +1,9 @@
+
+CFLAGS_init.o := -mcmodel=medany
+ifdef CONFIG_FTRACE
+CFLAGS_REMOVE_init.o = -pg
+endif
+
 obj-y += init.o
 obj-y += fault.o
 obj-y += extable.o
index b379a75ac6a6778052b9161612357ba5df620648..bc7b77e34d0920f2190c7e8c4edd18658c526703 100644 (file)
 #include <asm/pgtable.h>
 #include <asm/io.h>
 
+unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
+                                                       __page_aligned_bss;
+EXPORT_SYMBOL(empty_zero_page);
+
 static void __init zone_sizes_init(void)
 {
        unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
@@ -117,6 +121,14 @@ void __init setup_bootmem(void)
                         */
                        memblock_reserve(reg->base, vmlinux_end - reg->base);
                        mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
+
+                       /*
+                        * Remove memblock from the end of usable area to the
+                        * end of region
+                        */
+                       if (reg->base + mem_size < end)
+                               memblock_remove(reg->base + mem_size,
+                                               end - reg->base - mem_size);
                }
        }
        BUG_ON(mem_size == 0);
@@ -143,6 +155,11 @@ void __init setup_bootmem(void)
        }
 }
 
+unsigned long va_pa_offset;
+EXPORT_SYMBOL(va_pa_offset);
+unsigned long pfn_base;
+EXPORT_SYMBOL(pfn_base);
+
 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
 pgd_t trampoline_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
 
@@ -172,6 +189,25 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
        }
 }
 
+/*
+ * setup_vm() is called from head.S with MMU-off.
+ *
+ * Following requirements should be honoured for setup_vm() to work
+ * correctly:
+ * 1) It should use PC-relative addressing for accessing kernel symbols.
+ *    To achieve this we always use GCC cmodel=medany.
+ * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
+ *    so disable compiler instrumentation when FTRACE is enabled.
+ *
+ * Currently, the above requirements are honoured by using custom CFLAGS
+ * for init.o in mm/Makefile.
+ */
+
+#ifndef __riscv_cmodel_medany
+#error "setup_vm() is called from head.S before relocate so it should "
+       "not use absolute addressing."
+#endif
+
 asmlinkage void __init setup_vm(void)
 {
        extern char _start;
index 4cb771ba13fa7fb39da31ac6a8428744c8026d16..5d316fe40480446b9dd5f90fc0bb4f3bba6d3b55 100644 (file)
@@ -25,7 +25,7 @@ static void *mem_detect_alloc_extended(void)
 {
        unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
 
-       if (IS_ENABLED(BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
+       if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
            INITRD_START < offset + ENTRIES_EXTENDED_MAX)
                offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64));
 
index 96f9a9151fde02fc6f76633d76d292f47512d364..59c3e91f2cdb6636023eefc4b3a1dd7507f3b2f6 100644 (file)
@@ -56,40 +56,32 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
        unsigned long mask = -1UL;
+       unsigned int n = 6;
 
-       /*
-        * No arguments for this syscall, there's nothing to do.
-        */
-       if (!n)
-               return;
-
-       BUG_ON(i + n > 6);
 #ifdef CONFIG_COMPAT
        if (test_tsk_thread_flag(task, TIF_31BIT))
                mask = 0xffffffff;
 #endif
        while (n-- > 0)
-               if (i + n > 0)
-                       args[n] = regs->gprs[2 + i + n] & mask;
-       if (i == 0)
-               args[0] = regs->orig_gpr2 & mask;
+               if (n > 0)
+                       args[n] = regs->gprs[2 + n] & mask;
+
+       args[0] = regs->orig_gpr2 & mask;
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       BUG_ON(i + n > 6);
+       unsigned int n = 6;
+
        while (n-- > 0)
-               if (i + n > 0)
-                       regs->gprs[2 + i + n] = args[n];
-       if (i == 0)
-               regs->orig_gpr2 = args[0];
+               if (n > 0)
+                       regs->gprs[2 + n] = args[n];
+       regs->orig_gpr2 = args[0];
 }
 
 static inline int syscall_get_arch(void)
index 594464f2129d4706fc4786d2de55d7c73974c97c..0da378e2eb25edcfee1f787b50eb900947b2ffc4 100644 (file)
@@ -23,7 +23,7 @@ void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
 
        if (flags & KERNEL_FPC)
                /* Save floating point control */
-               asm volatile("stfpc %0" : "=m" (state->fpc));
+               asm volatile("stfpc %0" : "=Q" (state->fpc));
 
        if (!MACHINE_HAS_VX) {
                if (flags & KERNEL_VXR_V0V7) {
index a69a0911ed0e82720b10b124d0153681f2c821ea..c475ca49cfc6b43c02ab924e218e541a92b677b8 100644 (file)
@@ -37,7 +37,7 @@ static inline u64 get_vtimer(void)
 {
        u64 timer;
 
-       asm volatile("stpt %0" : "=m" (timer));
+       asm volatile("stpt %0" : "=Q" (timer));
        return timer;
 }
 
@@ -48,7 +48,7 @@ static inline void set_vtimer(u64 expires)
        asm volatile(
                "       stpt    %0\n"   /* Store current cpu timer value */
                "       spt     %1"     /* Set new value imm. afterwards */
-               : "=m" (timer) : "m" (expires));
+               : "=Q" (timer) : "Q" (expires));
        S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
        S390_lowcore.last_update_timer = expires;
 }
@@ -135,8 +135,8 @@ static int do_account_vtime(struct task_struct *tsk)
 #else
                "       stck    %1"     /* Store current tod clock value */
 #endif
-               : "=m" (S390_lowcore.last_update_timer),
-                 "=m" (S390_lowcore.last_update_clock));
+               : "=Q" (S390_lowcore.last_update_timer),
+                 "=Q" (S390_lowcore.last_update_clock));
        clock = S390_lowcore.last_update_clock - clock;
        timer -= S390_lowcore.last_update_timer;
 
index 958f46da3a7912cfd94a7b517e8e88b88fec3a5a..d91065e81a4e5cffcb2b86463c8dba9190c0d7fa 100644 (file)
@@ -164,10 +164,10 @@ static struct sh_machine_vector __initmv sh_of_generic_mv = {
 
 struct sh_clk_ops;
 
-void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
+void __init __weak arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
 {
 }
 
-void __init plat_irq_setup(void)
+void __init __weak plat_irq_setup(void)
 {
 }
index 6e118799831c32dc37b8cf21960d284c5bec3646..8c9d7e5e5dcc02375eeafab25e47878b76239aaa 100644 (file)
@@ -48,51 +48,28 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       /*
-        * Do this simply for now. If we need to start supporting
-        * fetching arguments from arbitrary indices, this will need some
-        * extra logic. Presently there are no in-tree users that depend
-        * on this behaviour.
-        */
-       BUG_ON(i);
 
        /* Argument pattern is: R4, R5, R6, R7, R0, R1 */
-       switch (n) {
-       case 6: args[5] = regs->regs[1];
-       case 5: args[4] = regs->regs[0];
-       case 4: args[3] = regs->regs[7];
-       case 3: args[2] = regs->regs[6];
-       case 2: args[1] = regs->regs[5];
-       case 1: args[0] = regs->regs[4];
-       case 0:
-               break;
-       default:
-               BUG();
-       }
+       args[5] = regs->regs[1];
+       args[4] = regs->regs[0];
+       args[3] = regs->regs[7];
+       args[2] = regs->regs[6];
+       args[1] = regs->regs[5];
+       args[0] = regs->regs[4];
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       /* Same note as above applies */
-       BUG_ON(i);
-
-       switch (n) {
-       case 6: regs->regs[1] = args[5];
-       case 5: regs->regs[0] = args[4];
-       case 4: regs->regs[7] = args[3];
-       case 3: regs->regs[6] = args[2];
-       case 2: regs->regs[5] = args[1];
-       case 1: regs->regs[4] = args[0];
-               break;
-       default:
-               BUG();
-       }
+       regs->regs[1] = args[5];
+       regs->regs[0] = args[4];
+       regs->regs[7] = args[3];
+       regs->regs[6] = args[2];
+       regs->regs[5] = args[1];
+       regs->regs[4] = args[0];
 }
 
 static inline int syscall_get_arch(void)
index 43882580c7f99bec93e519f1b4182c1daad2fbf0..22fad97da06619a137f6f4cd3e3ca4f6a9bddfcc 100644 (file)
@@ -47,20 +47,16 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-       memcpy(args, &regs->regs[2 + i], n * sizeof(args[0]));
+       memcpy(args, &regs->regs[2], 6 * sizeof(args[0]));
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-       memcpy(&regs->regs[2 + i], args, n * sizeof(args[0]));
+       memcpy(&regs->regs[2], args, 6 * sizeof(args[0]));
 }
 
 static inline int syscall_get_arch(void)
index 053989e3f6a6f1435323873ea010723ac09736bd..4d075434e8164c18e140249d65cbffdb28290dc6 100644 (file)
@@ -96,11 +96,11 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
        int zero_extend = 0;
        unsigned int j;
+       unsigned int n = 6;
 
 #ifdef CONFIG_SPARC64
        if (test_tsk_thread_flag(task, TIF_32BIT))
@@ -108,7 +108,7 @@ static inline void syscall_get_arguments(struct task_struct *task,
 #endif
 
        for (j = 0; j < n; j++) {
-               unsigned long val = regs->u_regs[UREG_I0 + i + j];
+               unsigned long val = regs->u_regs[UREG_I0 + j];
 
                if (zero_extend)
                        args[j] = (u32) val;
@@ -119,13 +119,12 @@ static inline void syscall_get_arguments(struct task_struct *task,
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       unsigned int j;
+       unsigned int i;
 
-       for (j = 0; j < n; j++)
-               regs->u_regs[UREG_I0 + i + j] = args[j];
+       for (i = 0; i < 6; i++)
+               regs->u_regs[UREG_I0 + i] = args[i];
 }
 
 static inline int syscall_get_arch(void)
index a8af6023c1263f7b43a4a52f52089235493923bd..14b93c5564e3572c07993c74217fb0b89ee36573 100644 (file)
@@ -73,6 +73,11 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns
        p->npages       = 0;
 }
 
+static inline bool iommu_use_atu(struct iommu *iommu, u64 mask)
+{
+       return iommu->atu && mask > DMA_BIT_MASK(32);
+}
+
 /* Interrupts must be disabled.  */
 static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
 {
@@ -92,7 +97,7 @@ static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
                prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
 
        while (npages != 0) {
-               if (mask <= DMA_BIT_MASK(32) || !pbm->iommu->atu) {
+               if (!iommu_use_atu(pbm->iommu, mask)) {
                        num = pci_sun4v_iommu_map(devhandle,
                                                  HV_PCI_TSBID(0, entry),
                                                  npages,
@@ -179,7 +184,6 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
        unsigned long flags, order, first_page, npages, n;
        unsigned long prot = 0;
        struct iommu *iommu;
-       struct atu *atu;
        struct iommu_map_table *tbl;
        struct page *page;
        void *ret;
@@ -205,13 +209,11 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
        memset((char *)first_page, 0, PAGE_SIZE << order);
 
        iommu = dev->archdata.iommu;
-       atu = iommu->atu;
-
        mask = dev->coherent_dma_mask;
-       if (mask <= DMA_BIT_MASK(32) || !atu)
+       if (!iommu_use_atu(iommu, mask))
                tbl = &iommu->tbl;
        else
-               tbl = &atu->tbl;
+               tbl = &iommu->atu->tbl;
 
        entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
                                      (unsigned long)(-1), 0);
@@ -333,7 +335,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
        atu = iommu->atu;
        devhandle = pbm->devhandle;
 
-       if (dvma <= DMA_BIT_MASK(32)) {
+       if (!iommu_use_atu(iommu, dvma)) {
                tbl = &iommu->tbl;
                iotsb_num = 0; /* we don't care for legacy iommu */
        } else {
@@ -374,7 +376,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
        npages >>= IO_PAGE_SHIFT;
 
        mask = *dev->dma_mask;
-       if (mask <= DMA_BIT_MASK(32))
+       if (!iommu_use_atu(iommu, mask))
                tbl = &iommu->tbl;
        else
                tbl = &atu->tbl;
@@ -510,7 +512,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
                                  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
 
        mask = *dev->dma_mask;
-       if (mask <= DMA_BIT_MASK(32))
+       if (!iommu_use_atu(iommu, mask))
                tbl = &iommu->tbl;
        else
                tbl = &atu->tbl;
index 9fb9cf8cd39a3b29f45a80d1a4281d2abd82a262..98e50c50c12efb65ee100eecff49ea6a54e74853 100644 (file)
@@ -53,84 +53,30 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
        const struct uml_pt_regs *r = &regs->regs;
 
-       switch (i) {
-       case 0:
-               if (!n--)
-                       break;
-               *args++ = UPT_SYSCALL_ARG1(r);
-       case 1:
-               if (!n--)
-                       break;
-               *args++ = UPT_SYSCALL_ARG2(r);
-       case 2:
-               if (!n--)
-                       break;
-               *args++ = UPT_SYSCALL_ARG3(r);
-       case 3:
-               if (!n--)
-                       break;
-               *args++ = UPT_SYSCALL_ARG4(r);
-       case 4:
-               if (!n--)
-                       break;
-               *args++ = UPT_SYSCALL_ARG5(r);
-       case 5:
-               if (!n--)
-                       break;
-               *args++ = UPT_SYSCALL_ARG6(r);
-       case 6:
-               if (!n--)
-                       break;
-       default:
-               BUG();
-               break;
-       }
+       *args++ = UPT_SYSCALL_ARG1(r);
+       *args++ = UPT_SYSCALL_ARG2(r);
+       *args++ = UPT_SYSCALL_ARG3(r);
+       *args++ = UPT_SYSCALL_ARG4(r);
+       *args++ = UPT_SYSCALL_ARG5(r);
+       *args   = UPT_SYSCALL_ARG6(r);
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
        struct uml_pt_regs *r = &regs->regs;
 
-       switch (i) {
-       case 0:
-               if (!n--)
-                       break;
-               UPT_SYSCALL_ARG1(r) = *args++;
-       case 1:
-               if (!n--)
-                       break;
-               UPT_SYSCALL_ARG2(r) = *args++;
-       case 2:
-               if (!n--)
-                       break;
-               UPT_SYSCALL_ARG3(r) = *args++;
-       case 3:
-               if (!n--)
-                       break;
-               UPT_SYSCALL_ARG4(r) = *args++;
-       case 4:
-               if (!n--)
-                       break;
-               UPT_SYSCALL_ARG5(r) = *args++;
-       case 5:
-               if (!n--)
-                       break;
-               UPT_SYSCALL_ARG6(r) = *args++;
-       case 6:
-               if (!n--)
-                       break;
-       default:
-               BUG();
-               break;
-       }
+       UPT_SYSCALL_ARG1(r) = *args++;
+       UPT_SYSCALL_ARG2(r) = *args++;
+       UPT_SYSCALL_ARG3(r) = *args++;
+       UPT_SYSCALL_ARG4(r) = *args++;
+       UPT_SYSCALL_ARG5(r) = *args++;
+       UPT_SYSCALL_ARG6(r) = *args;
 }
 
 /* See arch/x86/um/asm/syscall.h for syscall_get_arch() definition. */
index 5ad92419be19c5d67fae37158bac2b93e105d1bc..62fc3fda1a058eed944d0a37dfe8565b80ac0fd8 100644 (file)
@@ -1499,7 +1499,7 @@ config X86_CPA_STATISTICS
        depends on DEBUG_FS
        ---help---
          Expose statistics about the Change Page Attribute mechanims, which
-         helps to determine the effectivness of preserving large and huge
+         helps to determine the effectiveness of preserving large and huge
          page mappings when mapping protections are changed.
 
 config ARCH_HAS_MEM_ENCRYPT
index 3b6e70d085da89775317c8e2a560625ab4799e01..8457cdd47f751167a2321ebf063eb18bdb4ef8aa 100644 (file)
@@ -323,6 +323,12 @@ ENTRY(poly1305_4block_avx2)
        vpaddq          t2,t1,t1
        vmovq           t1x,d4
 
+       # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
+       # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
+       # amount.  Careful: we must not assume the carry bits 'd0 >> 26',
+       # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
+       # integers.  It's true in a single-block implementation, but not here.
+
        # d1 += d0 >> 26
        mov             d0,%rax
        shr             $26,%rax
@@ -361,16 +367,16 @@ ENTRY(poly1305_4block_avx2)
        # h0 += (d4 >> 26) * 5
        mov             d4,%rax
        shr             $26,%rax
-       lea             (%eax,%eax,4),%eax
-       add             %eax,%ebx
+       lea             (%rax,%rax,4),%rax
+       add             %rax,%rbx
        # h4 = d4 & 0x3ffffff
        mov             d4,%rax
        and             $0x3ffffff,%eax
        mov             %eax,h4
 
        # h1 += h0 >> 26
-       mov             %ebx,%eax
-       shr             $26,%eax
+       mov             %rbx,%rax
+       shr             $26,%rax
        add             %eax,h1
        # h0 = h0 & 0x3ffffff
        andl            $0x3ffffff,%ebx
index e6add74d78a595b63789d419100b7c30b024e0fc..6f0be7a869641c92c4993e378b53c07a7d385f29 100644 (file)
@@ -253,16 +253,16 @@ ENTRY(poly1305_block_sse2)
        # h0 += (d4 >> 26) * 5
        mov             d4,%rax
        shr             $26,%rax
-       lea             (%eax,%eax,4),%eax
-       add             %eax,%ebx
+       lea             (%rax,%rax,4),%rax
+       add             %rax,%rbx
        # h4 = d4 & 0x3ffffff
        mov             d4,%rax
        and             $0x3ffffff,%eax
        mov             %eax,h4
 
        # h1 += h0 >> 26
-       mov             %ebx,%eax
-       shr             $26,%eax
+       mov             %rbx,%rax
+       shr             $26,%rax
        add             %eax,h1
        # h0 = h0 & 0x3ffffff
        andl            $0x3ffffff,%ebx
@@ -524,6 +524,12 @@ ENTRY(poly1305_2block_sse2)
        paddq           t2,t1
        movq            t1,d4
 
+       # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
+       # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
+       # amount.  Careful: we must not assume the carry bits 'd0 >> 26',
+       # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
+       # integers.  It's true in a single-block implementation, but not here.
+
        # d1 += d0 >> 26
        mov             d0,%rax
        shr             $26,%rax
@@ -562,16 +568,16 @@ ENTRY(poly1305_2block_sse2)
        # h0 += (d4 >> 26) * 5
        mov             d4,%rax
        shr             $26,%rax
-       lea             (%eax,%eax,4),%eax
-       add             %eax,%ebx
+       lea             (%rax,%rax,4),%rax
+       add             %rax,%rbx
        # h4 = d4 & 0x3ffffff
        mov             d4,%rax
        and             $0x3ffffff,%eax
        mov             %eax,h4
 
        # h1 += h0 >> 26
-       mov             %ebx,%eax
-       shr             $26,%eax
+       mov             %rbx,%rax
+       shr             $26,%rax
        add             %eax,h1
        # h0 = h0 & 0x3ffffff
        andl            $0x3ffffff,%ebx
index 7d2d7c801dba6abb226b630104d1f038242562cf..d45f3fbd232ea7c34ccf1ceda5f97ce8fefd48b8 100644 (file)
@@ -3,10 +3,14 @@
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <linux/delay.h>
 #include <asm/apicdef.h>
+#include <asm/nmi.h>
 
 #include "../perf_event.h"
 
+static DEFINE_PER_CPU(unsigned int, perf_nmi_counter);
+
 static __initconst const u64 amd_hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_MAX]
                                [PERF_COUNT_HW_CACHE_OP_MAX]
@@ -113,22 +117,39 @@ static __initconst const u64 amd_hw_cache_event_ids
 };
 
 /*
- * AMD Performance Monitor K7 and later.
+ * AMD Performance Monitor K7 and later, up to and including Family 16h:
  */
 static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
 {
-  [PERF_COUNT_HW_CPU_CYCLES]                   = 0x0076,
-  [PERF_COUNT_HW_INSTRUCTIONS]                 = 0x00c0,
-  [PERF_COUNT_HW_CACHE_REFERENCES]             = 0x077d,
-  [PERF_COUNT_HW_CACHE_MISSES]                 = 0x077e,
-  [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]          = 0x00c2,
-  [PERF_COUNT_HW_BRANCH_MISSES]                        = 0x00c3,
-  [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]      = 0x00d0, /* "Decoder empty" event */
-  [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]       = 0x00d1, /* "Dispatch stalls" event */
+       [PERF_COUNT_HW_CPU_CYCLES]              = 0x0076,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = 0x00c0,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = 0x077d,
+       [PERF_COUNT_HW_CACHE_MISSES]            = 0x077e,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0x00c2,
+       [PERF_COUNT_HW_BRANCH_MISSES]           = 0x00c3,
+       [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
+       [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = 0x00d1, /* "Dispatch stalls" event */
+};
+
+/*
+ * AMD Performance Monitor Family 17h and later:
+ */
+static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
+{
+       [PERF_COUNT_HW_CPU_CYCLES]              = 0x0076,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = 0x00c0,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = 0xff60,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0x00c2,
+       [PERF_COUNT_HW_BRANCH_MISSES]           = 0x00c3,
+       [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
+       [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = 0x0187,
 };
 
 static u64 amd_pmu_event_map(int hw_event)
 {
+       if (boot_cpu_data.x86 >= 0x17)
+               return amd_f17h_perfmon_event_map[hw_event];
+
        return amd_perfmon_event_map[hw_event];
 }
 
@@ -429,6 +450,132 @@ static void amd_pmu_cpu_dead(int cpu)
        }
 }
 
+/*
+ * When a PMC counter overflows, an NMI is used to process the event and
+ * reset the counter. NMI latency can result in the counter being updated
+ * before the NMI can run, which can result in what appear to be spurious
+ * NMIs. This function is intended to wait for the NMI to run and reset
+ * the counter to avoid possible unhandled NMI messages.
+ */
+#define OVERFLOW_WAIT_COUNT    50
+
+static void amd_pmu_wait_on_overflow(int idx)
+{
+       unsigned int i;
+       u64 counter;
+
+       /*
+        * Wait for the counter to be reset if it has overflowed. This loop
+        * should exit very, very quickly, but just in case, don't wait
+        * forever...
+        */
+       for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
+               rdmsrl(x86_pmu_event_addr(idx), counter);
+               if (counter & (1ULL << (x86_pmu.cntval_bits - 1)))
+                       break;
+
+               /* Might be in IRQ context, so can't sleep */
+               udelay(1);
+       }
+}
+
+static void amd_pmu_disable_all(void)
+{
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       int idx;
+
+       x86_pmu_disable_all();
+
+       /*
+        * This shouldn't be called from NMI context, but add a safeguard here
+        * to return, since if we're in NMI context we can't wait for an NMI
+        * to reset an overflowed counter value.
+        */
+       if (in_nmi())
+               return;
+
+       /*
+        * Check each counter for overflow and wait for it to be reset by the
+        * NMI if it has overflowed. This relies on the fact that all active
+        * counters are always enabled when this function is caled and
+        * ARCH_PERFMON_EVENTSEL_INT is always set.
+        */
+       for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+               if (!test_bit(idx, cpuc->active_mask))
+                       continue;
+
+               amd_pmu_wait_on_overflow(idx);
+       }
+}
+
+static void amd_pmu_disable_event(struct perf_event *event)
+{
+       x86_pmu_disable_event(event);
+
+       /*
+        * This can be called from NMI context (via x86_pmu_stop). The counter
+        * may have overflowed, but either way, we'll never see it get reset
+        * by the NMI if we're already in the NMI. And the NMI latency support
+        * below will take care of any pending NMI that might have been
+        * generated by the overflow.
+        */
+       if (in_nmi())
+               return;
+
+       amd_pmu_wait_on_overflow(event->hw.idx);
+}
+
+/*
+ * Because of NMI latency, if multiple PMC counters are active or other sources
+ * of NMIs are received, the perf NMI handler can handle one or more overflowed
+ * PMC counters outside of the NMI associated with the PMC overflow. If the NMI
+ * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel
+ * back-to-back NMI support won't be active. This PMC handler needs to take into
+ * account that this can occur, otherwise this could result in unknown NMI
+ * messages being issued. Examples of this is PMC overflow while in the NMI
+ * handler when multiple PMCs are active or PMC overflow while handling some
+ * other source of an NMI.
+ *
+ * Attempt to mitigate this by using the number of active PMCs to determine
+ * whether to return NMI_HANDLED if the perf NMI handler did not handle/reset
+ * any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the
+ * number of active PMCs or 2. The value of 2 is used in case an NMI does not
+ * arrive at the LAPIC in time to be collapsed into an already pending NMI.
+ */
+static int amd_pmu_handle_irq(struct pt_regs *regs)
+{
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       int active, handled;
+
+       /*
+        * Obtain the active count before calling x86_pmu_handle_irq() since
+        * it is possible that x86_pmu_handle_irq() may make a counter
+        * inactive (through x86_pmu_stop).
+        */
+       active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
+
+       /* Process any counter overflows */
+       handled = x86_pmu_handle_irq(regs);
+
+       /*
+        * If a counter was handled, record the number of possible remaining
+        * NMIs that can occur.
+        */
+       if (handled) {
+               this_cpu_write(perf_nmi_counter,
+                              min_t(unsigned int, 2, active));
+
+               return handled;
+       }
+
+       if (!this_cpu_read(perf_nmi_counter))
+               return NMI_DONE;
+
+       this_cpu_dec(perf_nmi_counter);
+
+       return NMI_HANDLED;
+}
+
 static struct event_constraint *
 amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
                          struct perf_event *event)
@@ -621,11 +768,11 @@ static ssize_t amd_event_sysfs_show(char *page, u64 config)
 
 static __initconst const struct x86_pmu amd_pmu = {
        .name                   = "AMD",
-       .handle_irq             = x86_pmu_handle_irq,
-       .disable_all            = x86_pmu_disable_all,
+       .handle_irq             = amd_pmu_handle_irq,
+       .disable_all            = amd_pmu_disable_all,
        .enable_all             = x86_pmu_enable_all,
        .enable                 = x86_pmu_enable_event,
-       .disable                = x86_pmu_disable_event,
+       .disable                = amd_pmu_disable_event,
        .hw_config              = amd_pmu_hw_config,
        .schedule_events        = x86_schedule_events,
        .eventsel               = MSR_K7_EVNTSEL0,
@@ -732,7 +879,7 @@ void amd_pmu_enable_virt(void)
        cpuc->perf_ctr_virt_mask = 0;
 
        /* Reload all events */
-       x86_pmu_disable_all();
+       amd_pmu_disable_all();
        x86_pmu_enable_all(0);
 }
 EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
@@ -750,7 +897,7 @@ void amd_pmu_disable_virt(void)
        cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
 
        /* Reload all events */
-       x86_pmu_disable_all();
+       amd_pmu_disable_all();
        x86_pmu_enable_all(0);
 }
 EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
index e2b1447192a888ffafb2883ddbdfbbd37c1e9315..81911e11a15dfcd7cff5694d0a2a83df769a655b 100644 (file)
@@ -1349,8 +1349,9 @@ void x86_pmu_stop(struct perf_event *event, int flags)
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
 
-       if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
+       if (test_bit(hwc->idx, cpuc->active_mask)) {
                x86_pmu.disable(event);
+               __clear_bit(hwc->idx, cpuc->active_mask);
                cpuc->events[hwc->idx] = NULL;
                WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
                hwc->state |= PERF_HES_STOPPED;
@@ -1447,16 +1448,8 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
        apic_write(APIC_LVTPC, APIC_DM_NMI);
 
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
-               if (!test_bit(idx, cpuc->active_mask)) {
-                       /*
-                        * Though we deactivated the counter some cpus
-                        * might still deliver spurious interrupts still
-                        * in flight. Catch them:
-                        */
-                       if (__test_and_clear_bit(idx, cpuc->running))
-                               handled++;
+               if (!test_bit(idx, cpuc->active_mask))
                        continue;
-               }
 
                event = cpuc->events[idx];
 
index 8baa441d8000f6c4efbde5afe72d4e5a518d2184..f9451566cd9b2c4398d56b18f669a2e0dde8a98c 100644 (file)
@@ -3131,7 +3131,7 @@ static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
                flags &= ~PERF_SAMPLE_TIME;
        if (!event->attr.exclude_kernel)
                flags &= ~PERF_SAMPLE_REGS_USER;
-       if (event->attr.sample_regs_user & ~PEBS_REGS)
+       if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
                flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
        return flags;
 }
@@ -3185,7 +3185,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
                return ret;
 
        if (event->attr.precise_ip) {
-               if (!event->attr.freq) {
+               if (!(event->attr.freq || event->attr.wakeup_events)) {
                        event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
                        if (!(event->attr.sample_type &
                              ~intel_pmu_large_pebs_flags(event)))
@@ -3575,6 +3575,12 @@ static void intel_pmu_cpu_starting(int cpu)
 
        cpuc->lbr_sel = NULL;
 
+       if (x86_pmu.flags & PMU_FL_TFA) {
+               WARN_ON_ONCE(cpuc->tfa_shadow);
+               cpuc->tfa_shadow = ~0ULL;
+               intel_set_tfa(cpuc, false);
+       }
+
        if (x86_pmu.version > 1)
                flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
 
index a75955741c50422b9894d454c1a33ec7c9790a77..1e98a42b560ad2d9a320b7e3198ff11e3dde3c6d 100644 (file)
@@ -96,25 +96,25 @@ struct amd_nb {
        PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \
        PERF_SAMPLE_PERIOD)
 
-#define PEBS_REGS \
-       (PERF_REG_X86_AX | \
-        PERF_REG_X86_BX | \
-        PERF_REG_X86_CX | \
-        PERF_REG_X86_DX | \
-        PERF_REG_X86_DI | \
-        PERF_REG_X86_SI | \
-        PERF_REG_X86_SP | \
-        PERF_REG_X86_BP | \
-        PERF_REG_X86_IP | \
-        PERF_REG_X86_FLAGS | \
-        PERF_REG_X86_R8 | \
-        PERF_REG_X86_R9 | \
-        PERF_REG_X86_R10 | \
-        PERF_REG_X86_R11 | \
-        PERF_REG_X86_R12 | \
-        PERF_REG_X86_R13 | \
-        PERF_REG_X86_R14 | \
-        PERF_REG_X86_R15)
+#define PEBS_GP_REGS                   \
+       ((1ULL << PERF_REG_X86_AX)    | \
+        (1ULL << PERF_REG_X86_BX)    | \
+        (1ULL << PERF_REG_X86_CX)    | \
+        (1ULL << PERF_REG_X86_DX)    | \
+        (1ULL << PERF_REG_X86_DI)    | \
+        (1ULL << PERF_REG_X86_SI)    | \
+        (1ULL << PERF_REG_X86_SP)    | \
+        (1ULL << PERF_REG_X86_BP)    | \
+        (1ULL << PERF_REG_X86_IP)    | \
+        (1ULL << PERF_REG_X86_FLAGS) | \
+        (1ULL << PERF_REG_X86_R8)    | \
+        (1ULL << PERF_REG_X86_R9)    | \
+        (1ULL << PERF_REG_X86_R10)   | \
+        (1ULL << PERF_REG_X86_R11)   | \
+        (1ULL << PERF_REG_X86_R12)   | \
+        (1ULL << PERF_REG_X86_R13)   | \
+        (1ULL << PERF_REG_X86_R14)   | \
+        (1ULL << PERF_REG_X86_R15))
 
 /*
  * Per register state.
index d153d570bb04755d9fb106e3375db55dd3114fd7..8e790ec219a5fd5be0e812736ff7be167a5cd20e 100644 (file)
  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  */
 
-#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
+#define RLONG_ADDR(x)                   "m" (*(volatile long *) (x))
+#define WBYTE_ADDR(x)                  "+m" (*(volatile char *) (x))
 
-#define ADDR                           BITOP_ADDR(addr)
+#define ADDR                           RLONG_ADDR(addr)
 
 /*
  * We do the locked ops that don't return the old value as
  * a mask operation on a byte.
  */
 #define IS_IMMEDIATE(nr)               (__builtin_constant_p(nr))
-#define CONST_MASK_ADDR(nr, addr)      BITOP_ADDR((void *)(addr) + ((nr)>>3))
+#define CONST_MASK_ADDR(nr, addr)      WBYTE_ADDR((void *)(addr) + ((nr)>>3))
 #define CONST_MASK(nr)                 (1 << ((nr) & 7))
 
 /**
@@ -73,7 +74,7 @@ set_bit(long nr, volatile unsigned long *addr)
                        : "memory");
        } else {
                asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
-                       : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
+                       : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
        }
 }
 
@@ -88,7 +89,7 @@ set_bit(long nr, volatile unsigned long *addr)
  */
 static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
 {
-       asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory");
+       asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
 }
 
 /**
@@ -110,8 +111,7 @@ clear_bit(long nr, volatile unsigned long *addr)
                        : "iq" ((u8)~CONST_MASK(nr)));
        } else {
                asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
-                       : BITOP_ADDR(addr)
-                       : "Ir" (nr));
+                       : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
        }
 }
 
@@ -131,7 +131,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad
 
 static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
 {
-       asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr));
+       asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
 }
 
 static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
@@ -139,7 +139,7 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
        bool negative;
        asm volatile(LOCK_PREFIX "andb %2,%1"
                CC_SET(s)
-               : CC_OUT(s) (negative), ADDR
+               : CC_OUT(s) (negative), WBYTE_ADDR(addr)
                : "ir" ((char) ~(1 << nr)) : "memory");
        return negative;
 }
@@ -155,13 +155,9 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
  * __clear_bit() is non-atomic and implies release semantics before the memory
  * operation. It can be used for an unlock if no other CPUs can concurrently
  * modify other bits in the word.
- *
- * No memory barrier is required here, because x86 cannot reorder stores past
- * older loads. Same principle as spin_unlock.
  */
 static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
 {
-       barrier();
        __clear_bit(nr, addr);
 }
 
@@ -176,7 +172,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *
  */
 static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
 {
-       asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr));
+       asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
 }
 
 /**
@@ -196,8 +192,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
                        : "iq" ((u8)CONST_MASK(nr)));
        } else {
                asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
-                       : BITOP_ADDR(addr)
-                       : "Ir" (nr));
+                       : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
        }
 }
 
@@ -242,8 +237,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
 
        asm(__ASM_SIZE(bts) " %2,%1"
            CC_SET(c)
-           : CC_OUT(c) (oldbit), ADDR
-           : "Ir" (nr));
+           : CC_OUT(c) (oldbit)
+           : ADDR, "Ir" (nr) : "memory");
        return oldbit;
 }
 
@@ -282,8 +277,8 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long
 
        asm volatile(__ASM_SIZE(btr) " %2,%1"
                     CC_SET(c)
-                    : CC_OUT(c) (oldbit), ADDR
-                    : "Ir" (nr));
+                    : CC_OUT(c) (oldbit)
+                    : ADDR, "Ir" (nr) : "memory");
        return oldbit;
 }
 
@@ -294,8 +289,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
 
        asm volatile(__ASM_SIZE(btc) " %2,%1"
                     CC_SET(c)
-                    : CC_OUT(c) (oldbit), ADDR
-                    : "Ir" (nr) : "memory");
+                    : CC_OUT(c) (oldbit)
+                    : ADDR, "Ir" (nr) : "memory");
 
        return oldbit;
 }
@@ -326,7 +321,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
        asm volatile(__ASM_SIZE(bt) " %2,%1"
                     CC_SET(c)
                     : CC_OUT(c) (oldbit)
-                    : "m" (*(unsigned long *)addr), "Ir" (nr));
+                    : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
 
        return oldbit;
 }
index 93c4bf598fb06c7e53865141dd3e7faa514194ff..feab24cac610e25f276d3d1f71f4705c23106b00 100644 (file)
@@ -226,7 +226,9 @@ struct x86_emulate_ops {
 
        unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
        void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
-       int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt, u64 smbase);
+       int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt,
+                            const char *smstate);
+       void (*post_leave_smm)(struct x86_emulate_ctxt *ctxt);
 
 };
 
index 159b5988292f33ec2d1a079bf7d10ba2bc999d4b..a9d03af340307db6589376cf3bfb29a533910cdd 100644 (file)
@@ -126,7 +126,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
 }
 
 #define KVM_PERMILLE_MMU_PAGES 20
-#define KVM_MIN_ALLOC_MMU_PAGES 64
+#define KVM_MIN_ALLOC_MMU_PAGES 64UL
 #define KVM_MMU_HASH_SHIFT 12
 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
 #define KVM_MIN_FREE_MMU_PAGES 5
@@ -844,9 +844,9 @@ enum kvm_irqchip_mode {
 };
 
 struct kvm_arch {
-       unsigned int n_used_mmu_pages;
-       unsigned int n_requested_mmu_pages;
-       unsigned int n_max_mmu_pages;
+       unsigned long n_used_mmu_pages;
+       unsigned long n_requested_mmu_pages;
+       unsigned long n_max_mmu_pages;
        unsigned int indirect_shadow_pages;
        struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
        /*
@@ -1182,7 +1182,7 @@ struct kvm_x86_ops {
 
        int (*smi_allowed)(struct kvm_vcpu *vcpu);
        int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
-       int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase);
+       int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
        int (*enable_smi_window)(struct kvm_vcpu *vcpu);
 
        int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
@@ -1256,8 +1256,8 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
                                   gfn_t gfn_offset, unsigned long mask);
 void kvm_mmu_zap_all(struct kvm *kvm);
 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
-unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
-void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
+unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
+void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
 
 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
 bool pdptrs_changed(struct kvm_vcpu *vcpu);
@@ -1592,4 +1592,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
 #define put_smstate(type, buf, offset, val)                      \
        *(type *)((buf) + (offset) - 0x7e00) = val
 
+#define GET_SMSTATE(type, buf, offset)         \
+       (*(type *)((buf) + (offset) - 0x7e00))
+
 #endif /* _ASM_X86_KVM_HOST_H */
index d653139857af2a1121f877b611c8d56d4e4690f0..4c305471ec3312e3b0adc7063c30e9d3edf2de7f 100644 (file)
@@ -91,11 +91,9 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-       memcpy(args, &regs->bx + i, n * sizeof(args[0]));
+       memcpy(args, &regs->bx, 6 * sizeof(args[0]));
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
@@ -116,124 +114,50 @@ static inline int syscall_get_arch(void)
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
 # ifdef CONFIG_IA32_EMULATION
-       if (task->thread_info.status & TS_COMPAT)
-               switch (i) {
-               case 0:
-                       if (!n--) break;
-                       *args++ = regs->bx;
-               case 1:
-                       if (!n--) break;
-                       *args++ = regs->cx;
-               case 2:
-                       if (!n--) break;
-                       *args++ = regs->dx;
-               case 3:
-                       if (!n--) break;
-                       *args++ = regs->si;
-               case 4:
-                       if (!n--) break;
-                       *args++ = regs->di;
-               case 5:
-                       if (!n--) break;
-                       *args++ = regs->bp;
-               case 6:
-                       if (!n--) break;
-               default:
-                       BUG();
-                       break;
-               }
-       else
+       if (task->thread_info.status & TS_COMPAT) {
+               *args++ = regs->bx;
+               *args++ = regs->cx;
+               *args++ = regs->dx;
+               *args++ = regs->si;
+               *args++ = regs->di;
+               *args   = regs->bp;
+       } else
 # endif
-               switch (i) {
-               case 0:
-                       if (!n--) break;
-                       *args++ = regs->di;
-               case 1:
-                       if (!n--) break;
-                       *args++ = regs->si;
-               case 2:
-                       if (!n--) break;
-                       *args++ = regs->dx;
-               case 3:
-                       if (!n--) break;
-                       *args++ = regs->r10;
-               case 4:
-                       if (!n--) break;
-                       *args++ = regs->r8;
-               case 5:
-                       if (!n--) break;
-                       *args++ = regs->r9;
-               case 6:
-                       if (!n--) break;
-               default:
-                       BUG();
-                       break;
-               }
+       {
+               *args++ = regs->di;
+               *args++ = regs->si;
+               *args++ = regs->dx;
+               *args++ = regs->r10;
+               *args++ = regs->r8;
+               *args   = regs->r9;
+       }
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
 # ifdef CONFIG_IA32_EMULATION
-       if (task->thread_info.status & TS_COMPAT)
-               switch (i) {
-               case 0:
-                       if (!n--) break;
-                       regs->bx = *args++;
-               case 1:
-                       if (!n--) break;
-                       regs->cx = *args++;
-               case 2:
-                       if (!n--) break;
-                       regs->dx = *args++;
-               case 3:
-                       if (!n--) break;
-                       regs->si = *args++;
-               case 4:
-                       if (!n--) break;
-                       regs->di = *args++;
-               case 5:
-                       if (!n--) break;
-                       regs->bp = *args++;
-               case 6:
-                       if (!n--) break;
-               default:
-                       BUG();
-                       break;
-               }
-       else
+       if (task->thread_info.status & TS_COMPAT) {
+               regs->bx = *args++;
+               regs->cx = *args++;
+               regs->dx = *args++;
+               regs->si = *args++;
+               regs->di = *args++;
+               regs->bp = *args;
+       } else
 # endif
-               switch (i) {
-               case 0:
-                       if (!n--) break;
-                       regs->di = *args++;
-               case 1:
-                       if (!n--) break;
-                       regs->si = *args++;
-               case 2:
-                       if (!n--) break;
-                       regs->dx = *args++;
-               case 3:
-                       if (!n--) break;
-                       regs->r10 = *args++;
-               case 4:
-                       if (!n--) break;
-                       regs->r8 = *args++;
-               case 5:
-                       if (!n--) break;
-                       regs->r9 = *args++;
-               case 6:
-                       if (!n--) break;
-               default:
-                       BUG();
-                       break;
-               }
+       {
+               regs->di = *args++;
+               regs->si = *args++;
+               regs->dx = *args++;
+               regs->r10 = *args++;
+               regs->r8 = *args++;
+               regs->r9 = *args;
+       }
 }
 
 static inline int syscall_get_arch(void)
index de6f0d59a24f418febf72e40dd595e41dcb3c7c0..2863c2026655815c2237a939d66e390e3a7623bf 100644 (file)
@@ -206,6 +206,9 @@ xen_single_call(unsigned int call,
        __HYPERCALL_DECLS;
        __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
 
+       if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
+               return -EINVAL;
+
        asm volatile(CALL_NOSPEC
                     : __HYPERCALL_5PARAM
                     : [thunk_target] "a" (&hypercall_page[call])
index f0b0c90dd398246eb2882050d69c6b53ccca11af..d213ec5c3766db0dd5176c951b13e5f3c1514cfb 100644 (file)
 
 #define VMX_ABORT_SAVE_GUEST_MSR_FAIL        1
 #define VMX_ABORT_LOAD_HOST_PDPTE_FAIL       2
+#define VMX_ABORT_VMCS_CORRUPTED             3
 #define VMX_ABORT_LOAD_HOST_MSR_FAIL         4
 
 #endif /* _UAPIVMX_H */
index 2da82eff0eb4f8498c8cdd65bd9f9dd5fa1fa6eb..b91b3bfa5cfbef1fb0cb8cbfa4011aa404e6c073 100644 (file)
@@ -275,7 +275,7 @@ static const struct {
        const char                      *option;
        enum spectre_v2_user_cmd        cmd;
        bool                            secure;
-} v2_user_options[] __initdata = {
+} v2_user_options[] __initconst = {
        { "auto",               SPECTRE_V2_USER_CMD_AUTO,               false },
        { "off",                SPECTRE_V2_USER_CMD_NONE,               false },
        { "on",                 SPECTRE_V2_USER_CMD_FORCE,              true  },
@@ -419,7 +419,7 @@ static const struct {
        const char *option;
        enum spectre_v2_mitigation_cmd cmd;
        bool secure;
-} mitigation_options[] __initdata = {
+} mitigation_options[] __initconst = {
        { "off",                SPECTRE_V2_CMD_NONE,              false },
        { "on",                 SPECTRE_V2_CMD_FORCE,             true  },
        { "retpoline",          SPECTRE_V2_CMD_RETPOLINE,         false },
@@ -658,7 +658,7 @@ static const char * const ssb_strings[] = {
 static const struct {
        const char *option;
        enum ssb_mitigation_cmd cmd;
-} ssb_mitigation_options[]  __initdata = {
+} ssb_mitigation_options[]  __initconst = {
        { "auto",       SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
        { "on",         SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
        { "off",        SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
index fc3c07fe7df58a22c01c8c1180d0b394bde8b59a..3142fd7a9b32201fe34f9933232127c89c09c017 100644 (file)
@@ -611,8 +611,8 @@ static void init_intel_energy_perf(struct cpuinfo_x86 *c)
        if ((epb & 0xF) != ENERGY_PERF_BIAS_PERFORMANCE)
                return;
 
-       pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
-       pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n");
+       pr_info_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
+       pr_info_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n");
        epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
        wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
 }
index 399601eda8e43c2cf8a855b44b4dc811a247c9e5..85212a32b54df8be06365eeaf4ef399996a3abff 100644 (file)
@@ -2039,14 +2039,14 @@ out:
 enum rdt_param {
        Opt_cdp,
        Opt_cdpl2,
-       Opt_mba_mpbs,
+       Opt_mba_mbps,
        nr__rdt_params
 };
 
 static const struct fs_parameter_spec rdt_param_specs[] = {
        fsparam_flag("cdp",             Opt_cdp),
        fsparam_flag("cdpl2",           Opt_cdpl2),
-       fsparam_flag("mba_mpbs",        Opt_mba_mpbs),
+       fsparam_flag("mba_MBps",        Opt_mba_mbps),
        {}
 };
 
@@ -2072,7 +2072,7 @@ static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param)
        case Opt_cdpl2:
                ctx->enable_cdpl2 = true;
                return 0;
-       case Opt_mba_mpbs:
+       case Opt_mba_mbps:
                if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
                        return -EINVAL;
                ctx->enable_mba_mbps = true;
@@ -2610,9 +2610,10 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
                        rdt_last_cmd_puts("Failed to initialize allocations\n");
                        return ret;
                }
-               rdtgrp->mode = RDT_MODE_SHAREABLE;
        }
 
+       rdtgrp->mode = RDT_MODE_SHAREABLE;
+
        return 0;
 }
 
index a034cb808e7eb482e6fd8eae3fac9afca63b429c..fed46ddb1eef2d3de307f1cbb899f45c4b3e67c2 100644 (file)
@@ -569,6 +569,7 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
        unsigned long *sara = stack_addr(regs);
 
        ri->ret_addr = (kprobe_opcode_t *) *sara;
+       ri->fp = sara;
 
        /* Replace the return addr with trampoline addr */
        *sara = (unsigned long) &kretprobe_trampoline;
@@ -748,26 +749,48 @@ asm(
 NOKPROBE_SYMBOL(kretprobe_trampoline);
 STACK_FRAME_NON_STANDARD(kretprobe_trampoline);
 
+static struct kprobe kretprobe_kprobe = {
+       .addr = (void *)kretprobe_trampoline,
+};
+
 /*
  * Called from kretprobe_trampoline
  */
 static __used void *trampoline_handler(struct pt_regs *regs)
 {
+       struct kprobe_ctlblk *kcb;
        struct kretprobe_instance *ri = NULL;
        struct hlist_head *head, empty_rp;
        struct hlist_node *tmp;
        unsigned long flags, orig_ret_address = 0;
        unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
        kprobe_opcode_t *correct_ret_addr = NULL;
+       void *frame_pointer;
+       bool skipped = false;
+
+       preempt_disable();
+
+       /*
+        * Set a dummy kprobe for avoiding kretprobe recursion.
+        * Since kretprobe never run in kprobe handler, kprobe must not
+        * be running at this point.
+        */
+       kcb = get_kprobe_ctlblk();
+       __this_cpu_write(current_kprobe, &kretprobe_kprobe);
+       kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 
        INIT_HLIST_HEAD(&empty_rp);
        kretprobe_hash_lock(current, &head, &flags);
        /* fixup registers */
 #ifdef CONFIG_X86_64
        regs->cs = __KERNEL_CS;
+       /* On x86-64, we use pt_regs->sp for return address holder. */
+       frame_pointer = &regs->sp;
 #else
        regs->cs = __KERNEL_CS | get_kernel_rpl();
        regs->gs = 0;
+       /* On x86-32, we use pt_regs->flags for return address holder. */
+       frame_pointer = &regs->flags;
 #endif
        regs->ip = trampoline_address;
        regs->orig_ax = ~0UL;
@@ -789,8 +812,25 @@ static __used void *trampoline_handler(struct pt_regs *regs)
                if (ri->task != current)
                        /* another task is sharing our hash bucket */
                        continue;
+               /*
+                * Return probes must be pushed on this hash list correct
+                * order (same as return order) so that it can be poped
+                * correctly. However, if we find it is pushed it incorrect
+                * order, this means we find a function which should not be
+                * probed, because the wrong order entry is pushed on the
+                * path of processing other kretprobe itself.
+                */
+               if (ri->fp != frame_pointer) {
+                       if (!skipped)
+                               pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n");
+                       skipped = true;
+                       continue;
+               }
 
                orig_ret_address = (unsigned long)ri->ret_addr;
+               if (skipped)
+                       pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
+                               ri->rp->kp.addr);
 
                if (orig_ret_address != trampoline_address)
                        /*
@@ -808,14 +848,15 @@ static __used void *trampoline_handler(struct pt_regs *regs)
                if (ri->task != current)
                        /* another task is sharing our hash bucket */
                        continue;
+               if (ri->fp != frame_pointer)
+                       continue;
 
                orig_ret_address = (unsigned long)ri->ret_addr;
                if (ri->rp && ri->rp->handler) {
                        __this_cpu_write(current_kprobe, &ri->rp->kp);
-                       get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
                        ri->ret_addr = correct_ret_addr;
                        ri->rp->handler(ri, regs);
-                       __this_cpu_write(current_kprobe, NULL);
+                       __this_cpu_write(current_kprobe, &kretprobe_kprobe);
                }
 
                recycle_rp_inst(ri, &empty_rp);
@@ -831,6 +872,9 @@ static __used void *trampoline_handler(struct pt_regs *regs)
 
        kretprobe_hash_unlock(current, &flags);
 
+       __this_cpu_write(current_kprobe, NULL);
+       preempt_enable();
+
        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
                kfree(ri);
index 58ac7be52c7a6df944dca7305492b8ce70ed8d8e..957eae13b37008339b6dfaad350ea9468fccb760 100644 (file)
@@ -426,6 +426,8 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
        u64 msr = x86_spec_ctrl_base;
        bool updmsr = false;
 
+       lockdep_assert_irqs_disabled();
+
        /*
         * If TIF_SSBD is different, select the proper mitigation
         * method. Note that if SSBD mitigation is disabled or permanentely
@@ -477,10 +479,12 @@ static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
 
 void speculation_ctrl_update(unsigned long tif)
 {
+       unsigned long flags;
+
        /* Forced update. Make sure all relevant TIF flags are different */
-       preempt_disable();
+       local_irq_save(flags);
        __speculation_ctrl_update(~tif, tif);
-       preempt_enable();
+       local_irq_restore(flags);
 }
 
 /* Called from seccomp/prctl update */
index 725624b6c0c05cdc0c94175214a7ce796df47eee..8fd3cedd9accdd1c17757e5a381b2ab1eac1c032 100644 (file)
@@ -81,6 +81,19 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
        return 0;
 }
 
+/*
+ * Some machines don't handle the default ACPI reboot method and
+ * require the EFI reboot method:
+ */
+static int __init set_efi_reboot(const struct dmi_system_id *d)
+{
+       if (reboot_type != BOOT_EFI && !efi_runtime_disabled()) {
+               reboot_type = BOOT_EFI;
+               pr_info("%s series board detected. Selecting EFI-method for reboot.\n", d->ident);
+       }
+       return 0;
+}
+
 void __noreturn machine_real_restart(unsigned int type)
 {
        local_irq_disable();
@@ -166,6 +179,14 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
                },
        },
+       {       /* Handle reboot issue on Acer TravelMate X514-51T */
+               .callback = set_efi_reboot,
+               .ident = "Acer TravelMate X514-51T",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate X514-51T"),
+               },
+       },
 
        /* Apple */
        {       /* Handle problems with rebooting on Apple MacBook5 */
index bad8c51fee6eea6be91d7a594e820470c121c2a9..a5127b2c195f9df3031e1df660764bc1624078f2 100644 (file)
@@ -362,7 +362,7 @@ SECTIONS
        .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
                __bss_start = .;
                *(.bss..page_aligned)
-               *(.bss)
+               *(BSS_MAIN)
                BSS_DECRYPTED
                . = ALIGN(PAGE_SIZE);
                __bss_stop = .;
index c338984c850d28a1213e46f86efc06d425115660..d0d5dd44b4f478524cc959cefb245695d9e40894 100644 (file)
@@ -2331,24 +2331,18 @@ static int em_lseg(struct x86_emulate_ctxt *ctxt)
 
 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
 {
+#ifdef CONFIG_X86_64
        u32 eax, ebx, ecx, edx;
 
        eax = 0x80000001;
        ecx = 0;
        ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
        return edx & bit(X86_FEATURE_LM);
+#else
+       return false;
+#endif
 }
 
-#define GET_SMSTATE(type, smbase, offset)                                \
-       ({                                                                \
-        type __val;                                                      \
-        int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val,      \
-                                     sizeof(__val));                     \
-        if (r != X86EMUL_CONTINUE)                                       \
-                return X86EMUL_UNHANDLEABLE;                             \
-        __val;                                                           \
-       })
-
 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
 {
        desc->g    = (flags >> 23) & 1;
@@ -2361,27 +2355,30 @@ static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
        desc->type = (flags >>  8) & 15;
 }
 
-static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
+static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
+                          int n)
 {
        struct desc_struct desc;
        int offset;
        u16 selector;
 
-       selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
+       selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
 
        if (n < 3)
                offset = 0x7f84 + n * 12;
        else
                offset = 0x7f2c + (n - 3) * 12;
 
-       set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
-       set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
-       rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
+       set_desc_base(&desc,      GET_SMSTATE(u32, smstate, offset + 8));
+       set_desc_limit(&desc,     GET_SMSTATE(u32, smstate, offset + 4));
+       rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
        ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
        return X86EMUL_CONTINUE;
 }
 
-static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
+#ifdef CONFIG_X86_64
+static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
+                          int n)
 {
        struct desc_struct desc;
        int offset;
@@ -2390,15 +2387,16 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
 
        offset = 0x7e00 + n * 16;
 
-       selector =                GET_SMSTATE(u16, smbase, offset);
-       rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
-       set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
-       set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
-       base3 =                   GET_SMSTATE(u32, smbase, offset + 12);
+       selector =                GET_SMSTATE(u16, smstate, offset);
+       rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
+       set_desc_limit(&desc,     GET_SMSTATE(u32, smstate, offset + 4));
+       set_desc_base(&desc,      GET_SMSTATE(u32, smstate, offset + 8));
+       base3 =                   GET_SMSTATE(u32, smstate, offset + 12);
 
        ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
        return X86EMUL_CONTINUE;
 }
+#endif
 
 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
                                    u64 cr0, u64 cr3, u64 cr4)
@@ -2445,7 +2443,8 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
        return X86EMUL_CONTINUE;
 }
 
-static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
+static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
+                            const char *smstate)
 {
        struct desc_struct desc;
        struct desc_ptr dt;
@@ -2453,53 +2452,55 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
        u32 val, cr0, cr3, cr4;
        int i;
 
-       cr0 =                      GET_SMSTATE(u32, smbase, 0x7ffc);
-       cr3 =                      GET_SMSTATE(u32, smbase, 0x7ff8);
-       ctxt->eflags =             GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
-       ctxt->_eip =               GET_SMSTATE(u32, smbase, 0x7ff0);
+       cr0 =                      GET_SMSTATE(u32, smstate, 0x7ffc);
+       cr3 =                      GET_SMSTATE(u32, smstate, 0x7ff8);
+       ctxt->eflags =             GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
+       ctxt->_eip =               GET_SMSTATE(u32, smstate, 0x7ff0);
 
        for (i = 0; i < 8; i++)
-               *reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
+               *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
 
-       val = GET_SMSTATE(u32, smbase, 0x7fcc);
+       val = GET_SMSTATE(u32, smstate, 0x7fcc);
        ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
-       val = GET_SMSTATE(u32, smbase, 0x7fc8);
+       val = GET_SMSTATE(u32, smstate, 0x7fc8);
        ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
 
-       selector =                 GET_SMSTATE(u32, smbase, 0x7fc4);
-       set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f64));
-       set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f60));
-       rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f5c));
+       selector =                 GET_SMSTATE(u32, smstate, 0x7fc4);
+       set_desc_base(&desc,       GET_SMSTATE(u32, smstate, 0x7f64));
+       set_desc_limit(&desc,      GET_SMSTATE(u32, smstate, 0x7f60));
+       rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smstate, 0x7f5c));
        ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
 
-       selector =                 GET_SMSTATE(u32, smbase, 0x7fc0);
-       set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f80));
-       set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f7c));
-       rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f78));
+       selector =                 GET_SMSTATE(u32, smstate, 0x7fc0);
+       set_desc_base(&desc,       GET_SMSTATE(u32, smstate, 0x7f80));
+       set_desc_limit(&desc,      GET_SMSTATE(u32, smstate, 0x7f7c));
+       rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smstate, 0x7f78));
        ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
 
-       dt.address =               GET_SMSTATE(u32, smbase, 0x7f74);
-       dt.size =                  GET_SMSTATE(u32, smbase, 0x7f70);
+       dt.address =               GET_SMSTATE(u32, smstate, 0x7f74);
+       dt.size =                  GET_SMSTATE(u32, smstate, 0x7f70);
        ctxt->ops->set_gdt(ctxt, &dt);
 
-       dt.address =               GET_SMSTATE(u32, smbase, 0x7f58);
-       dt.size =                  GET_SMSTATE(u32, smbase, 0x7f54);
+       dt.address =               GET_SMSTATE(u32, smstate, 0x7f58);
+       dt.size =                  GET_SMSTATE(u32, smstate, 0x7f54);
        ctxt->ops->set_idt(ctxt, &dt);
 
        for (i = 0; i < 6; i++) {
-               int r = rsm_load_seg_32(ctxt, smbase, i);
+               int r = rsm_load_seg_32(ctxt, smstate, i);
                if (r != X86EMUL_CONTINUE)
                        return r;
        }
 
-       cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
+       cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
 
-       ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
+       ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
 
        return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
 }
 
-static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
+#ifdef CONFIG_X86_64
+static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
+                            const char *smstate)
 {
        struct desc_struct desc;
        struct desc_ptr dt;
@@ -2509,43 +2510,43 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
        int i, r;
 
        for (i = 0; i < 16; i++)
-               *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
+               *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
 
-       ctxt->_eip   = GET_SMSTATE(u64, smbase, 0x7f78);
-       ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
+       ctxt->_eip   = GET_SMSTATE(u64, smstate, 0x7f78);
+       ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
 
-       val = GET_SMSTATE(u32, smbase, 0x7f68);
+       val = GET_SMSTATE(u32, smstate, 0x7f68);
        ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
-       val = GET_SMSTATE(u32, smbase, 0x7f60);
+       val = GET_SMSTATE(u32, smstate, 0x7f60);
        ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
 
-       cr0 =                       GET_SMSTATE(u64, smbase, 0x7f58);
-       cr3 =                       GET_SMSTATE(u64, smbase, 0x7f50);
-       cr4 =                       GET_SMSTATE(u64, smbase, 0x7f48);
-       ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
-       val =                       GET_SMSTATE(u64, smbase, 0x7ed0);
+       cr0 =                       GET_SMSTATE(u64, smstate, 0x7f58);
+       cr3 =                       GET_SMSTATE(u64, smstate, 0x7f50);
+       cr4 =                       GET_SMSTATE(u64, smstate, 0x7f48);
+       ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
+       val =                       GET_SMSTATE(u64, smstate, 0x7ed0);
        ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
 
-       selector =                  GET_SMSTATE(u32, smbase, 0x7e90);
-       rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e92) << 8);
-       set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e94));
-       set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e98));
-       base3 =                     GET_SMSTATE(u32, smbase, 0x7e9c);
+       selector =                  GET_SMSTATE(u32, smstate, 0x7e90);
+       rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smstate, 0x7e92) << 8);
+       set_desc_limit(&desc,       GET_SMSTATE(u32, smstate, 0x7e94));
+       set_desc_base(&desc,        GET_SMSTATE(u32, smstate, 0x7e98));
+       base3 =                     GET_SMSTATE(u32, smstate, 0x7e9c);
        ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
 
-       dt.size =                   GET_SMSTATE(u32, smbase, 0x7e84);
-       dt.address =                GET_SMSTATE(u64, smbase, 0x7e88);
+       dt.size =                   GET_SMSTATE(u32, smstate, 0x7e84);
+       dt.address =                GET_SMSTATE(u64, smstate, 0x7e88);
        ctxt->ops->set_idt(ctxt, &dt);
 
-       selector =                  GET_SMSTATE(u32, smbase, 0x7e70);
-       rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e72) << 8);
-       set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e74));
-       set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e78));
-       base3 =                     GET_SMSTATE(u32, smbase, 0x7e7c);
+       selector =                  GET_SMSTATE(u32, smstate, 0x7e70);
+       rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smstate, 0x7e72) << 8);
+       set_desc_limit(&desc,       GET_SMSTATE(u32, smstate, 0x7e74));
+       set_desc_base(&desc,        GET_SMSTATE(u32, smstate, 0x7e78));
+       base3 =                     GET_SMSTATE(u32, smstate, 0x7e7c);
        ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
 
-       dt.size =                   GET_SMSTATE(u32, smbase, 0x7e64);
-       dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
+       dt.size =                   GET_SMSTATE(u32, smstate, 0x7e64);
+       dt.address =                GET_SMSTATE(u64, smstate, 0x7e68);
        ctxt->ops->set_gdt(ctxt, &dt);
 
        r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
@@ -2553,37 +2554,49 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
                return r;
 
        for (i = 0; i < 6; i++) {
-               r = rsm_load_seg_64(ctxt, smbase, i);
+               r = rsm_load_seg_64(ctxt, smstate, i);
                if (r != X86EMUL_CONTINUE)
                        return r;
        }
 
        return X86EMUL_CONTINUE;
 }
+#endif
 
 static int em_rsm(struct x86_emulate_ctxt *ctxt)
 {
        unsigned long cr0, cr4, efer;
+       char buf[512];
        u64 smbase;
        int ret;
 
        if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
                return emulate_ud(ctxt);
 
+       smbase = ctxt->ops->get_smbase(ctxt);
+
+       ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
+       if (ret != X86EMUL_CONTINUE)
+               return X86EMUL_UNHANDLEABLE;
+
+       if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
+               ctxt->ops->set_nmi_mask(ctxt, false);
+
+       ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
+               ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
+
        /*
         * Get back to real mode, to prepare a safe state in which to load
         * CR0/CR3/CR4/EFER.  It's all a bit more complicated if the vCPU
         * supports long mode.
         */
-       cr4 = ctxt->ops->get_cr(ctxt, 4);
        if (emulator_has_longmode(ctxt)) {
                struct desc_struct cs_desc;
 
                /* Zero CR4.PCIDE before CR0.PG.  */
-               if (cr4 & X86_CR4_PCIDE) {
+               cr4 = ctxt->ops->get_cr(ctxt, 4);
+               if (cr4 & X86_CR4_PCIDE)
                        ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
-                       cr4 &= ~X86_CR4_PCIDE;
-               }
 
                /* A 32-bit code segment is required to clear EFER.LMA.  */
                memset(&cs_desc, 0, sizeof(cs_desc));
@@ -2597,39 +2610,39 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
        if (cr0 & X86_CR0_PE)
                ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
 
-       /* Now clear CR4.PAE (which must be done before clearing EFER.LME).  */
-       if (cr4 & X86_CR4_PAE)
-               ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
-
-       /* And finally go back to 32-bit mode.  */
-       efer = 0;
-       ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
+       if (emulator_has_longmode(ctxt)) {
+               /* Clear CR4.PAE before clearing EFER.LME. */
+               cr4 = ctxt->ops->get_cr(ctxt, 4);
+               if (cr4 & X86_CR4_PAE)
+                       ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
 
-       smbase = ctxt->ops->get_smbase(ctxt);
+               /* And finally go back to 32-bit mode.  */
+               efer = 0;
+               ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
+       }
 
        /*
         * Give pre_leave_smm() a chance to make ISA-specific changes to the
         * vCPU state (e.g. enter guest mode) before loading state from the SMM
         * state-save area.
         */
-       if (ctxt->ops->pre_leave_smm(ctxt, smbase))
+       if (ctxt->ops->pre_leave_smm(ctxt, buf))
                return X86EMUL_UNHANDLEABLE;
 
+#ifdef CONFIG_X86_64
        if (emulator_has_longmode(ctxt))
-               ret = rsm_load_state_64(ctxt, smbase + 0x8000);
+               ret = rsm_load_state_64(ctxt, buf);
        else
-               ret = rsm_load_state_32(ctxt, smbase + 0x8000);
+#endif
+               ret = rsm_load_state_32(ctxt, buf);
 
        if (ret != X86EMUL_CONTINUE) {
                /* FIXME: should triple fault */
                return X86EMUL_UNHANDLEABLE;
        }
 
-       if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
-               ctxt->ops->set_nmi_mask(ctxt, false);
+       ctxt->ops->post_leave_smm(ctxt);
 
-       ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
-               ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
        return X86EMUL_CONTINUE;
 }
 
index 991fdf7fc17fbd9e1a4cab99d688a7af820d397c..9bf70cf845648f5e66143440166d57c5fd287bf9 100644 (file)
@@ -138,6 +138,7 @@ static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
                if (offset <= max_apic_id) {
                        u8 cluster_size = min(max_apic_id - offset + 1, 16U);
 
+                       offset = array_index_nospec(offset, map->max_apic_id + 1);
                        *cluster = &map->phys_map[offset];
                        *mask = dest_id & (0xffff >> (16 - cluster_size));
                } else {
@@ -901,7 +902,8 @@ static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
                if (irq->dest_id > map->max_apic_id) {
                        *bitmap = 0;
                } else {
-                       *dst = &map->phys_map[irq->dest_id];
+                       u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
+                       *dst = &map->phys_map[dest_id];
                        *bitmap = 1;
                }
                return true;
index eee455a8a612d00a516bfe892a690bcd8bc91e39..e10962dfc2032d982f124070b88f7d625d2b8f0b 100644 (file)
@@ -2007,7 +2007,7 @@ static int is_empty_shadow_page(u64 *spt)
  * aggregate version in order to make the slab shrinker
  * faster
  */
-static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
+static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
 {
        kvm->arch.n_used_mmu_pages += nr;
        percpu_counter_add(&kvm_total_used_mmu_pages, nr);
@@ -2238,7 +2238,7 @@ static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
                                        struct list_head *invalid_list,
                                        bool remote_flush)
 {
-       if (!remote_flush && !list_empty(invalid_list))
+       if (!remote_flush && list_empty(invalid_list))
                return false;
 
        if (!list_empty(invalid_list))
@@ -2763,7 +2763,7 @@ static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
  * Changing the number of mmu pages allocated to the vm
  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
  */
-void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
+void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
 {
        LIST_HEAD(invalid_list);
 
@@ -6031,10 +6031,10 @@ out:
 /*
  * Calculate mmu pages needed for kvm.
  */
-unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
+unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
 {
-       unsigned int nr_mmu_pages;
-       unsigned int  nr_pages = 0;
+       unsigned long nr_mmu_pages;
+       unsigned long nr_pages = 0;
        struct kvm_memslots *slots;
        struct kvm_memory_slot *memslot;
        int i;
@@ -6047,8 +6047,7 @@ unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
        }
 
        nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
-       nr_mmu_pages = max(nr_mmu_pages,
-                          (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
+       nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
 
        return nr_mmu_pages;
 }
index bbdc60f2fae89beb34c72716d9e7eb9c33584651..54c2a377795be6920bee9676e58555110c3a56b9 100644 (file)
@@ -64,7 +64,7 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
                                u64 fault_address, char *insn, int insn_len);
 
-static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
+static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
 {
        if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
                return kvm->arch.n_max_mmu_pages -
index 58ead7db71a312764b56d9f242e84820239eeb93..e39741997893a977fdda077ff637bf465fbb1748 100644 (file)
@@ -281,9 +281,13 @@ static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
 {
        bool fast_mode = idx & (1u << 31);
+       struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
        struct kvm_pmc *pmc;
        u64 ctr_val;
 
+       if (!pmu->version)
+               return 1;
+
        if (is_vmware_backdoor_pmc(idx))
                return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
 
index 426039285fd1fc5b10796c79278dd784af788545..406b558abfef7379eb46bd2de18e5d6890079eb9 100644 (file)
@@ -262,6 +262,7 @@ struct amd_svm_iommu_ir {
 };
 
 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK   (0xFF)
+#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT                        31
 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK               (1 << 31)
 
 #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK   (0xFFULL)
@@ -2692,6 +2693,7 @@ static int npf_interception(struct vcpu_svm *svm)
 static int db_interception(struct vcpu_svm *svm)
 {
        struct kvm_run *kvm_run = svm->vcpu.run;
+       struct kvm_vcpu *vcpu = &svm->vcpu;
 
        if (!(svm->vcpu.guest_debug &
              (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
@@ -2702,6 +2704,8 @@ static int db_interception(struct vcpu_svm *svm)
 
        if (svm->nmi_singlestep) {
                disable_nmi_singlestep(svm);
+               /* Make sure we check for pending NMIs upon entry */
+               kvm_make_request(KVM_REQ_EVENT, vcpu);
        }
 
        if (svm->vcpu.guest_debug &
@@ -4517,14 +4521,25 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
                kvm_lapic_reg_write(apic, APIC_ICR, icrl);
                break;
        case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
+               int i;
+               struct kvm_vcpu *vcpu;
+               struct kvm *kvm = svm->vcpu.kvm;
                struct kvm_lapic *apic = svm->vcpu.arch.apic;
 
                /*
-                * Update ICR high and low, then emulate sending IPI,
-                * which is handled when writing APIC_ICR.
+                * At this point, we expect that the AVIC HW has already
+                * set the appropriate IRR bits on the valid target
+                * vcpus. So, we just need to kick the appropriate vcpu.
                 */
-               kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
-               kvm_lapic_reg_write(apic, APIC_ICR, icrl);
+               kvm_for_each_vcpu(i, vcpu, kvm) {
+                       bool m = kvm_apic_match_dest(vcpu, apic,
+                                                    icrl & KVM_APIC_SHORT_MASK,
+                                                    GET_APIC_DEST_FIELD(icrh),
+                                                    icrl & KVM_APIC_DEST_MASK);
+
+                       if (m && !avic_vcpu_is_running(vcpu))
+                               kvm_vcpu_wake_up(vcpu);
+               }
                break;
        }
        case AVIC_IPI_FAILURE_INVALID_TARGET:
@@ -4596,7 +4611,7 @@ static void avic_invalidate_logical_id_entry(struct kvm_vcpu *vcpu)
        u32 *entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat);
 
        if (entry)
-               WRITE_ONCE(*entry, (u32) ~AVIC_LOGICAL_ID_ENTRY_VALID_MASK);
+               clear_bit(AVIC_LOGICAL_ID_ENTRY_VALID_BIT, (unsigned long *)entry);
 }
 
 static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
@@ -5621,6 +5636,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
        svm->vmcb->save.cr2 = vcpu->arch.cr2;
 
        clgi();
+       kvm_load_guest_xcr0(vcpu);
 
        /*
         * If this vCPU has touched SPEC_CTRL, restore the guest's value if
@@ -5766,6 +5782,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
        if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
                kvm_before_interrupt(&svm->vcpu);
 
+       kvm_put_guest_xcr0(vcpu);
        stgi();
 
        /* Any pending NMI will happen here */
@@ -6215,32 +6232,24 @@ static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
        return 0;
 }
 
-static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
+static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        struct vmcb *nested_vmcb;
        struct page *page;
-       struct {
-               u64 guest;
-               u64 vmcb;
-       } svm_state_save;
-       int ret;
+       u64 guest;
+       u64 vmcb;
 
-       ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfed8, &svm_state_save,
-                                 sizeof(svm_state_save));
-       if (ret)
-               return ret;
+       guest = GET_SMSTATE(u64, smstate, 0x7ed8);
+       vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
 
-       if (svm_state_save.guest) {
-               vcpu->arch.hflags &= ~HF_SMM_MASK;
-               nested_vmcb = nested_svm_map(svm, svm_state_save.vmcb, &page);
-               if (nested_vmcb)
-                       enter_svm_guest_mode(svm, svm_state_save.vmcb, nested_vmcb, page);
-               else
-                       ret = 1;
-               vcpu->arch.hflags |= HF_SMM_MASK;
+       if (guest) {
+               nested_vmcb = nested_svm_map(svm, vmcb, &page);
+               if (!nested_vmcb)
+                       return 1;
+               enter_svm_guest_mode(svm, vmcb, nested_vmcb, page);
        }
-       return ret;
+       return 0;
 }
 
 static int enable_smi_window(struct kvm_vcpu *vcpu)
@@ -6422,11 +6431,11 @@ e_free:
        return ret;
 }
 
-static int get_num_contig_pages(int idx, struct page **inpages,
-                               unsigned long npages)
+static unsigned long get_num_contig_pages(unsigned long idx,
+                               struct page **inpages, unsigned long npages)
 {
        unsigned long paddr, next_paddr;
-       int i = idx + 1, pages = 1;
+       unsigned long i = idx + 1, pages = 1;
 
        /* find the number of contiguous pages starting from idx */
        paddr = __sme_page_pa(inpages[idx]);
@@ -6445,12 +6454,12 @@ static int get_num_contig_pages(int idx, struct page **inpages,
 
 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
 {
-       unsigned long vaddr, vaddr_end, next_vaddr, npages, size;
+       unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
        struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
        struct kvm_sev_launch_update_data params;
        struct sev_data_launch_update_data *data;
        struct page **inpages;
-       int i, ret, pages;
+       int ret;
 
        if (!sev_guest(kvm))
                return -ENOTTY;
@@ -6799,7 +6808,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
        struct page **src_p, **dst_p;
        struct kvm_sev_dbg debug;
        unsigned long n;
-       int ret, size;
+       unsigned int size;
+       int ret;
 
        if (!sev_guest(kvm))
                return -ENOTTY;
@@ -6807,6 +6817,11 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
        if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
                return -EFAULT;
 
+       if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
+               return -EINVAL;
+       if (!debug.dst_uaddr)
+               return -EINVAL;
+
        vaddr = debug.src_uaddr;
        size = debug.len;
        vaddr_end = vaddr + size;
@@ -6857,8 +6872,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
                                                     dst_vaddr,
                                                     len, &argp->error);
 
-               sev_unpin_memory(kvm, src_p, 1);
-               sev_unpin_memory(kvm, dst_p, 1);
+               sev_unpin_memory(kvm, src_p, n);
+               sev_unpin_memory(kvm, dst_p, n);
 
                if (ret)
                        goto err;
index 6432d08c7de79ccbde654b7ab17c9649b75a25c2..4d47a2631d1fb46d9f913b59743cb5417d7401c6 100644 (file)
@@ -438,13 +438,13 @@ TRACE_EVENT(kvm_apic_ipi,
 );
 
 TRACE_EVENT(kvm_apic_accept_irq,
-           TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec),
+           TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec),
            TP_ARGS(apicid, dm, tm, vec),
 
        TP_STRUCT__entry(
                __field(        __u32,          apicid          )
                __field(        __u16,          dm              )
-               __field(        __u8,           tm              )
+               __field(        __u16,          tm              )
                __field(        __u8,           vec             )
        ),
 
index 153e539c29c92fcb3c55c3737ec7e0533c13e1e2..6401eb7ef19ce0e9f9258b617e001dfdac534a2a 100644 (file)
@@ -500,6 +500,17 @@ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
        }
 }
 
+static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) {
+       int msr;
+
+       for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
+               unsigned word = msr / BITS_PER_LONG;
+
+               msr_bitmap[word] = ~0;
+               msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
+       }
+}
+
 /*
  * Merge L0's and L1's MSR bitmap, return false to indicate that
  * we do not use the hardware.
@@ -541,39 +552,44 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
                return false;
 
        msr_bitmap_l1 = (unsigned long *)kmap(page);
-       if (nested_cpu_has_apic_reg_virt(vmcs12)) {
-               /*
-                * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it
-                * just lets the processor take the value from the virtual-APIC page;
-                * take those 256 bits directly from the L1 bitmap.
-                */
-               for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
-                       unsigned word = msr / BITS_PER_LONG;
-                       msr_bitmap_l0[word] = msr_bitmap_l1[word];
-                       msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
-               }
-       } else {
-               for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
-                       unsigned word = msr / BITS_PER_LONG;
-                       msr_bitmap_l0[word] = ~0;
-                       msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
-               }
-       }
 
-       nested_vmx_disable_intercept_for_msr(
-               msr_bitmap_l1, msr_bitmap_l0,
-               X2APIC_MSR(APIC_TASKPRI),
-               MSR_TYPE_W);
+       /*
+        * To keep the control flow simple, pay eight 8-byte writes (sixteen
+        * 4-byte writes on 32-bit systems) up front to enable intercepts for
+        * the x2APIC MSR range and selectively disable them below.
+        */
+       enable_x2apic_msr_intercepts(msr_bitmap_l0);
+
+       if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
+               if (nested_cpu_has_apic_reg_virt(vmcs12)) {
+                       /*
+                        * L0 need not intercept reads for MSRs between 0x800
+                        * and 0x8ff, it just lets the processor take the value
+                        * from the virtual-APIC page; take those 256 bits
+                        * directly from the L1 bitmap.
+                        */
+                       for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
+                               unsigned word = msr / BITS_PER_LONG;
+
+                               msr_bitmap_l0[word] = msr_bitmap_l1[word];
+                       }
+               }
 
-       if (nested_cpu_has_vid(vmcs12)) {
-               nested_vmx_disable_intercept_for_msr(
-                       msr_bitmap_l1, msr_bitmap_l0,
-                       X2APIC_MSR(APIC_EOI),
-                       MSR_TYPE_W);
                nested_vmx_disable_intercept_for_msr(
                        msr_bitmap_l1, msr_bitmap_l0,
-                       X2APIC_MSR(APIC_SELF_IPI),
-                       MSR_TYPE_W);
+                       X2APIC_MSR(APIC_TASKPRI),
+                       MSR_TYPE_R | MSR_TYPE_W);
+
+               if (nested_cpu_has_vid(vmcs12)) {
+                       nested_vmx_disable_intercept_for_msr(
+                               msr_bitmap_l1, msr_bitmap_l0,
+                               X2APIC_MSR(APIC_EOI),
+                               MSR_TYPE_W);
+                       nested_vmx_disable_intercept_for_msr(
+                               msr_bitmap_l1, msr_bitmap_l0,
+                               X2APIC_MSR(APIC_SELF_IPI),
+                               MSR_TYPE_W);
+               }
        }
 
        if (spec_ctrl)
@@ -2857,20 +2873,27 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
                /*
                 * If translation failed, VM entry will fail because
                 * prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull.
-                * Failing the vm entry is _not_ what the processor
-                * does but it's basically the only possibility we
-                * have.  We could still enter the guest if CR8 load
-                * exits are enabled, CR8 store exits are enabled, and
-                * virtualize APIC access is disabled; in this case
-                * the processor would never use the TPR shadow and we
-                * could simply clear the bit from the execution
-                * control.  But such a configuration is useless, so
-                * let's keep the code simple.
                 */
                if (!is_error_page(page)) {
                        vmx->nested.virtual_apic_page = page;
                        hpa = page_to_phys(vmx->nested.virtual_apic_page);
                        vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa);
+               } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) &&
+                          nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) &&
+                          !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+                       /*
+                        * The processor will never use the TPR shadow, simply
+                        * clear the bit from the execution control.  Such a
+                        * configuration is useless, but it happens in tests.
+                        * For any other configuration, failing the vm entry is
+                        * _not_ what the processor does but it's basically the
+                        * only possibility we have.
+                        */
+                       vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
+                                       CPU_BASED_TPR_SHADOW);
+               } else {
+                       printk("bad virtual-APIC page address\n");
+                       dump_vmcs();
                }
        }
 
@@ -3773,8 +3796,18 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
        vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
 
        nested_ept_uninit_mmu_context(vcpu);
-       vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
-       __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+
+       /*
+        * This is only valid if EPT is in use, otherwise the vmcs01 GUEST_CR3
+        * points to shadow pages!  Fortunately we only get here after a WARN_ON
+        * if EPT is disabled, so a VMabort is perfectly fine.
+        */
+       if (enable_ept) {
+               vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
+               __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+       } else {
+               nested_vmx_abort(vcpu, VMX_ABORT_VMCS_CORRUPTED);
+       }
 
        /*
         * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
@@ -5722,6 +5755,14 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
 {
        int i;
 
+       /*
+        * Without EPT it is not possible to restore L1's CR3 and PDPTR on
+        * VMfail, because they are not available in vmcs01.  Just always
+        * use hardware checks.
+        */
+       if (!enable_ept)
+               nested_early_check = 1;
+
        if (!cpu_has_vmx_shadow_vmcs())
                enable_shadow_vmcs = 0;
        if (enable_shadow_vmcs) {
index ab432a930ae865d0000d8273643de236d0738fb8..b4e7d645275a2153c42fa252cce8a8cbb930b59e 100644 (file)
@@ -5603,7 +5603,7 @@ static void vmx_dump_dtsel(char *name, uint32_t limit)
               vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
 }
 
-static void dump_vmcs(void)
+void dump_vmcs(void)
 {
        u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
        u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
@@ -6410,6 +6410,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
        if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
                vmx_set_interrupt_shadow(vcpu, 0);
 
+       kvm_load_guest_xcr0(vcpu);
+
        if (static_cpu_has(X86_FEATURE_PKU) &&
            kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
            vcpu->arch.pkru != vmx->host_pkru)
@@ -6506,6 +6508,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
                        __write_pkru(vmx->host_pkru);
        }
 
+       kvm_put_guest_xcr0(vcpu);
+
        vmx->nested.nested_run_pending = 0;
        vmx->idt_vectoring_info = 0;
 
@@ -6852,6 +6856,30 @@ static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
        }
 }
 
+static bool guest_cpuid_has_pmu(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *entry;
+       union cpuid10_eax eax;
+
+       entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
+       if (!entry)
+               return false;
+
+       eax.full = entry->eax;
+       return (eax.split.version_id > 0);
+}
+
+static void nested_vmx_procbased_ctls_update(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       bool pmu_enabled = guest_cpuid_has_pmu(vcpu);
+
+       if (pmu_enabled)
+               vmx->nested.msrs.procbased_ctls_high |= CPU_BASED_RDPMC_EXITING;
+       else
+               vmx->nested.msrs.procbased_ctls_high &= ~CPU_BASED_RDPMC_EXITING;
+}
+
 static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6940,6 +6968,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
        if (nested_vmx_allowed(vcpu)) {
                nested_vmx_cr_fixed1_bits_update(vcpu);
                nested_vmx_entry_exit_ctls_update(vcpu);
+               nested_vmx_procbased_ctls_update(vcpu);
        }
 
        if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&
@@ -7369,7 +7398,7 @@ static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
        return 0;
 }
 
-static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
+static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        int ret;
@@ -7380,9 +7409,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
        }
 
        if (vmx->nested.smm.guest_mode) {
-               vcpu->arch.hflags &= ~HF_SMM_MASK;
                ret = nested_vmx_enter_non_root_mode(vcpu, false);
-               vcpu->arch.hflags |= HF_SMM_MASK;
                if (ret)
                        return ret;
 
index a1e00d0a2482c16b81be561c30a4d10d3233975b..f879529906b48cd84e99cc0f672210aaeaffeabd 100644 (file)
@@ -517,4 +517,6 @@ static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
        vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
 }
 
+void dump_vmcs(void);
+
 #endif /* __KVM_X86_VMX_H */
index 099b851dabafd7e2980f96472209777f9cc8f77b..a0d1fc80ac5a8407c123d8df12eb2215d4d70392 100644 (file)
@@ -800,7 +800,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
 }
 EXPORT_SYMBOL_GPL(kvm_lmsw);
 
-static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
+void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
 {
        if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
                        !vcpu->guest_xcr0_loaded) {
@@ -810,8 +810,9 @@ static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
                vcpu->guest_xcr0_loaded = 1;
        }
 }
+EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0);
 
-static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
+void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
 {
        if (vcpu->guest_xcr0_loaded) {
                if (vcpu->arch.xcr0 != host_xcr0)
@@ -819,6 +820,7 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
                vcpu->guest_xcr0_loaded = 0;
        }
 }
+EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0);
 
 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 {
@@ -3093,7 +3095,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                break;
        case KVM_CAP_NESTED_STATE:
                r = kvm_x86_ops->get_nested_state ?
-                       kvm_x86_ops->get_nested_state(NULL, 0, 0) : 0;
+                       kvm_x86_ops->get_nested_state(NULL, NULL, 0) : 0;
                break;
        default:
                break;
@@ -3528,7 +3530,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
        memset(&events->reserved, 0, sizeof(events->reserved));
 }
 
-static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags);
+static void kvm_smm_changed(struct kvm_vcpu *vcpu);
 
 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                                              struct kvm_vcpu_events *events)
@@ -3588,12 +3590,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                vcpu->arch.apic->sipi_vector = events->sipi_vector;
 
        if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
-               u32 hflags = vcpu->arch.hflags;
-               if (events->smi.smm)
-                       hflags |= HF_SMM_MASK;
-               else
-                       hflags &= ~HF_SMM_MASK;
-               kvm_set_hflags(vcpu, hflags);
+               if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
+                       if (events->smi.smm)
+                               vcpu->arch.hflags |= HF_SMM_MASK;
+                       else
+                               vcpu->arch.hflags &= ~HF_SMM_MASK;
+                       kvm_smm_changed(vcpu);
+               }
 
                vcpu->arch.smi_pending = events->smi.pending;
 
@@ -4270,7 +4273,7 @@ static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
 }
 
 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
-                                         u32 kvm_nr_mmu_pages)
+                                        unsigned long kvm_nr_mmu_pages)
 {
        if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
                return -EINVAL;
@@ -4284,7 +4287,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
        return 0;
 }
 
-static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
+static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
 {
        return kvm->arch.n_max_mmu_pages;
 }
@@ -5958,12 +5961,18 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
 
 static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
 {
-       kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
+       emul_to_vcpu(ctxt)->arch.hflags = emul_flags;
+}
+
+static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
+                                 const char *smstate)
+{
+       return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smstate);
 }
 
-static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, u64 smbase)
+static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt)
 {
-       return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smbase);
+       kvm_smm_changed(emul_to_vcpu(ctxt));
 }
 
 static const struct x86_emulate_ops emulate_ops = {
@@ -6006,6 +6015,7 @@ static const struct x86_emulate_ops emulate_ops = {
        .get_hflags          = emulator_get_hflags,
        .set_hflags          = emulator_set_hflags,
        .pre_leave_smm       = emulator_pre_leave_smm,
+       .post_leave_smm      = emulator_post_leave_smm,
 };
 
 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
@@ -6247,16 +6257,6 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu)
        kvm_mmu_reset_context(vcpu);
 }
 
-static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags)
-{
-       unsigned changed = vcpu->arch.hflags ^ emul_flags;
-
-       vcpu->arch.hflags = emul_flags;
-
-       if (changed & HF_SMM_MASK)
-               kvm_smm_changed(vcpu);
-}
-
 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
                                unsigned long *db)
 {
@@ -7441,9 +7441,9 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
        put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
 }
 
+#ifdef CONFIG_X86_64
 static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
 {
-#ifdef CONFIG_X86_64
        struct desc_ptr dt;
        struct kvm_segment seg;
        unsigned long val;
@@ -7493,10 +7493,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
 
        for (i = 0; i < 6; i++)
                enter_smm_save_seg_64(vcpu, buf, i);
-#else
-       WARN_ON_ONCE(1);
-#endif
 }
+#endif
 
 static void enter_smm(struct kvm_vcpu *vcpu)
 {
@@ -7507,9 +7505,11 @@ static void enter_smm(struct kvm_vcpu *vcpu)
 
        trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
        memset(buf, 0, 512);
+#ifdef CONFIG_X86_64
        if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
                enter_smm_save_state_64(vcpu, buf);
        else
+#endif
                enter_smm_save_state_32(vcpu, buf);
 
        /*
@@ -7567,8 +7567,10 @@ static void enter_smm(struct kvm_vcpu *vcpu)
        kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
        kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
 
+#ifdef CONFIG_X86_64
        if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
                kvm_x86_ops->set_efer(vcpu, 0);
+#endif
 
        kvm_update_cpuid(vcpu);
        kvm_mmu_reset_context(vcpu);
@@ -7865,8 +7867,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                goto cancel_injection;
        }
 
-       kvm_load_guest_xcr0(vcpu);
-
        if (req_immediate_exit) {
                kvm_make_request(KVM_REQ_EVENT, vcpu);
                kvm_x86_ops->request_immediate_exit(vcpu);
@@ -7919,8 +7919,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        vcpu->mode = OUTSIDE_GUEST_MODE;
        smp_wmb();
 
-       kvm_put_guest_xcr0(vcpu);
-
        kvm_before_interrupt(vcpu);
        kvm_x86_ops->handle_external_intr(vcpu);
        kvm_after_interrupt(vcpu);
index 28406aa1136d7eb772ed712f9df34ffe14290e66..aedc5d0d4989b3fc7422c17e55fc6b65bfef06a3 100644 (file)
@@ -347,4 +347,6 @@ static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
        __this_cpu_write(current_vcpu, NULL);
 }
 
+void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
+void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
 #endif
index ee8f8ab469417c6eb0f06aa6d4390a0eec8162b1..c0309ea9abee4201b1f697e9e61e20296f359736 100644 (file)
@@ -259,7 +259,8 @@ static void note_wx(struct pg_state *st)
 #endif
        /* Account the WX pages */
        st->wx_pages += npages;
-       WARN_ONCE(1, "x86/mm: Found insecure W+X mapping at address %pS\n",
+       WARN_ONCE(__supported_pte_mask & _PAGE_NX,
+                 "x86/mm: Found insecure W+X mapping at address %pS\n",
                  (void *)st->start_address);
 }
 
index 0029604af8a411397c019f066fae8dee7df8c805..dd73d5d74393f7c987e9c4c18fde1f698d9213ae 100644 (file)
@@ -825,7 +825,7 @@ void __init __early_set_fixmap(enum fixed_addresses idx,
        pte = early_ioremap_pte(addr);
 
        /* Sanitize 'prot' against any unsupported bits: */
-       pgprot_val(flags) &= __default_kernel_pte_mask;
+       pgprot_val(flags) &= __supported_pte_mask;
 
        if (pgprot_val(flags))
                set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
index 3f452ffed7e93f377aa1ae38150a1f2cf7e91a5c..d669c5e797e06e27a891f099739deb21e165673b 100644 (file)
@@ -94,7 +94,7 @@ void __init kernel_randomize_memory(void)
        if (!kaslr_memory_enabled())
                return;
 
-       kaslr_regions[0].size_tb = 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT);
+       kaslr_regions[0].size_tb = 1 << (MAX_PHYSMEM_BITS - TB_SHIFT);
        kaslr_regions[1].size_tb = VMALLOC_SIZE_TB;
 
        /*
index bc4bc7b2f075d3f302ba25dc261b759ab89dab97..487b8474c01cde006241a4c9a732bfe6aae53ff6 100644 (file)
@@ -728,7 +728,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
 {
        int cpu;
 
-       struct flush_tlb_info info __aligned(SMP_CACHE_BYTES) = {
+       struct flush_tlb_info info = {
                .mm = mm,
                .stride_shift = stride_shift,
                .freed_tables = freed_tables,
index f7dd895b2353e0510a7199896030c8527695c130..0c14018d1c2601a63a92b2f29be1270d9919220c 100644 (file)
@@ -187,15 +187,18 @@ struct thread_struct {
 
 /* Clearing a0 terminates the backtrace. */
 #define start_thread(regs, new_pc, new_sp) \
-       memset(regs, 0, sizeof(*regs)); \
-       regs->pc = new_pc; \
-       regs->ps = USER_PS_VALUE; \
-       regs->areg[1] = new_sp; \
-       regs->areg[0] = 0; \
-       regs->wmask = 1; \
-       regs->depc = 0; \
-       regs->windowbase = 0; \
-       regs->windowstart = 1;
+       do { \
+               memset((regs), 0, sizeof(*(regs))); \
+               (regs)->pc = (new_pc); \
+               (regs)->ps = USER_PS_VALUE; \
+               (regs)->areg[1] = (new_sp); \
+               (regs)->areg[0] = 0; \
+               (regs)->wmask = 1; \
+               (regs)->depc = 0; \
+               (regs)->windowbase = 0; \
+               (regs)->windowstart = 1; \
+               (regs)->syscall = NO_SYSCALL; \
+       } while (0)
 
 /* Forward declaration */
 struct task_struct;
index a168bf81c7f4701a036abaa251fa5dce7fad6c19..91dc06d580603bfd8025eeda2acaf09bd2260378 100644 (file)
@@ -59,45 +59,24 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
        static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS;
-       unsigned int j;
+       unsigned int i;
 
-       if (n == 0)
-               return;
-
-       WARN_ON_ONCE(i + n > SYSCALL_MAX_ARGS);
-
-       for (j = 0; j < n; ++j) {
-               if (i + j < SYSCALL_MAX_ARGS)
-                       args[j] = regs->areg[reg[i + j]];
-               else
-                       args[j] = 0;
-       }
+       for (i = 0; i < 6; ++i)
+               args[i] = regs->areg[reg[i]];
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
        static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS;
-       unsigned int j;
-
-       if (n == 0)
-               return;
-
-       if (WARN_ON_ONCE(i + n > SYSCALL_MAX_ARGS)) {
-               if (i < SYSCALL_MAX_ARGS)
-                       n = SYSCALL_MAX_ARGS - i;
-               else
-                       return;
-       }
+       unsigned int i;
 
-       for (j = 0; j < n; ++j)
-               regs->areg[reg[i + j]] = args[j];
+       for (i = 0; i < 6; ++i)
+               regs->areg[reg[i]] = args[i];
 }
 
 asmlinkage long xtensa_rt_sigreturn(struct pt_regs*);
index e50f5124dc6f789c7457cb3b296d67f2b5cc76e1..e54af8b7e0f8c314830ae1ae5244f86af19a1682 100644 (file)
@@ -1860,6 +1860,8 @@ ENTRY(system_call)
        l32i    a7, a2, PT_SYSCALL
 
 1:
+       s32i    a7, a1, 4
+
        /* syscall = sys_call_table[syscall_nr] */
 
        movi    a4, sys_call_table
@@ -1893,8 +1895,12 @@ ENTRY(system_call)
        retw
 
 1:
+       l32i    a4, a1, 4
+       l32i    a3, a2, PT_SYSCALL
+       s32i    a4, a2, PT_SYSCALL
        mov     a6, a2
        call4   do_syscall_trace_leave
+       s32i    a3, a2, PT_SYSCALL
        retw
 
 ENDPROC(system_call)
index 174c11f13bba375472f77a02eca75b1408d5e2de..b9f82510c65019506ffb98f3f23ac494f7285efa 100644 (file)
@@ -253,10 +253,14 @@ static int return_address_cb(struct stackframe *frame, void *data)
        return 1;
 }
 
+/*
+ * level == 0 is for the return address from the caller of this function,
+ * not from this function itself.
+ */
 unsigned long return_address(unsigned level)
 {
        struct return_addr_data r = {
-               .skip = level + 1,
+               .skip = level,
        };
        walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
        return r.addr;
index 2fb7d117222840da05f44cf7eed39348d27502e5..03678c4afc39b9e4ee94a3666b7ef630ad246d05 100644 (file)
@@ -33,7 +33,7 @@ static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
 
        pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE);
        if (!pte)
-               panic("%s: Failed to allocate %zu bytes align=%lx\n",
+               panic("%s: Failed to allocate %lu bytes align=%lx\n",
                      __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
 
        for (i = 0; i < n_pages; ++i)
index 4c592496a16a21655dcd8616ae4b3181111b3956..5ba1e0d841b4d552e3858ad30888e489669772bd 100644 (file)
@@ -674,7 +674,7 @@ static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
         * at least two nodes.
         */
        return !(varied_queue_weights || multiple_classes_busy
-#ifdef BFQ_GROUP_IOSCHED_ENABLED
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
               || bfqd->num_groups_with_pending_reqs > 0
 #endif
                );
@@ -2822,7 +2822,7 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
        bfq_remove_request(q, rq);
 }
 
-static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
 {
        /*
         * If this bfqq is shared between multiple processes, check
@@ -2855,9 +2855,11 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
        /*
         * All in-service entities must have been properly deactivated
         * or requeued before executing the next function, which
-        * resets all in-service entites as no more in service.
+        * resets all in-service entities as no more in service. This
+        * may cause bfqq to be freed. If this happens, the next
+        * function returns true.
         */
-       __bfq_bfqd_reset_in_service(bfqd);
+       return __bfq_bfqd_reset_in_service(bfqd);
 }
 
 /**
@@ -3262,7 +3264,6 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
        bool slow;
        unsigned long delta = 0;
        struct bfq_entity *entity = &bfqq->entity;
-       int ref;
 
        /*
         * Check whether the process is slow (see bfq_bfqq_is_slow).
@@ -3347,10 +3348,8 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
         * reason.
         */
        __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
-       ref = bfqq->ref;
-       __bfq_bfqq_expire(bfqd, bfqq);
-
-       if (ref == 1) /* bfqq is gone, no more actions on it */
+       if (__bfq_bfqq_expire(bfqd, bfqq))
+               /* bfqq is gone, no more actions on it */
                return;
 
        bfqq->injected_service = 0;
@@ -5397,7 +5396,7 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
        return min_shallow;
 }
 
-static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
+static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)
 {
        struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
        struct blk_mq_tags *tags = hctx->sched_tags;
@@ -5405,6 +5404,11 @@ static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
 
        min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags);
        sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow);
+}
+
+static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
+{
+       bfq_depth_updated(hctx);
        return 0;
 }
 
@@ -5827,6 +5831,7 @@ static struct elevator_type iosched_bfq_mq = {
                .requests_merged        = bfq_requests_merged,
                .request_merged         = bfq_request_merged,
                .has_work               = bfq_has_work,
+               .depth_updated          = bfq_depth_updated,
                .init_hctx              = bfq_init_hctx,
                .init_sched             = bfq_init_queue,
                .exit_sched             = bfq_exit_queue,
index 062e1c4787f4a9e66ac4df54d24c17d9f92c6577..86394e503ca9c0487a66d40deaa09643148ae3df 100644 (file)
@@ -995,7 +995,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity,
                             bool ins_into_idle_tree);
 bool next_queue_may_preempt(struct bfq_data *bfqd);
 struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd);
-void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
+bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
 void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
                         bool ins_into_idle_tree, bool expiration);
 void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
index 63311d1ff1edf41823ef2790ac3175535d2f2ef5..ae4d000ac0af1c38a49c28e824ca843bfb3c531d 100644 (file)
@@ -1012,7 +1012,7 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
                entity->on_st = true;
        }
 
-#ifdef BFQ_GROUP_IOSCHED_ENABLED
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
        if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */
                struct bfq_group *bfqg =
                        container_of(entity, struct bfq_group, entity);
@@ -1605,7 +1605,8 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
        return bfqq;
 }
 
-void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
+/* returns true if the in-service queue gets freed */
+bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
 {
        struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
        struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
@@ -1629,8 +1630,20 @@ void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
         * service tree either, then release the service reference to
         * the queue it represents (taken with bfq_get_entity).
         */
-       if (!in_serv_entity->on_st)
+       if (!in_serv_entity->on_st) {
+               /*
+                * If no process is referencing in_serv_bfqq any
+                * longer, then the service reference may be the only
+                * reference to the queue. If this is the case, then
+                * bfqq gets freed here.
+                */
+               int ref = in_serv_bfqq->ref;
                bfq_put_queue(in_serv_bfqq);
+               if (ref == 1)
+                       return true;
+       }
+
+       return false;
 }
 
 void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
index b64cedc7f87cf1cf5f24bf4c50c808ae6a59f210..716510ecd7ffa3f0535b49e3a443ea1ebe38ec12 100644 (file)
@@ -1298,8 +1298,11 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
                        }
                }
 
-               if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
+               if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
+                       if (!map_data)
+                               __free_page(page);
                        break;
+               }
 
                len -= bytes;
                offset = 0;
index 4673ebe4225534dc9965089ba76ad127963dcb0f..a55389ba877964e5ad69f173ed0a3fa001bb9936 100644 (file)
@@ -1245,8 +1245,6 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
  */
 blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
 {
-       blk_qc_t unused;
-
        if (blk_cloned_rq_check_limits(q, rq))
                return BLK_STS_IOERR;
 
@@ -1262,7 +1260,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
         * bypass a potential scheduler on the bottom device for
         * insert.
         */
-       return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, true);
+       return blk_mq_request_issue_directly(rq, true);
 }
 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
 
index 40905539afed347ebb7882d7e02c824c096e16ce..aa6bc5c0264388a549956c3f8acb57c1d144fb5f 100644 (file)
@@ -423,10 +423,12 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
                 * busy in case of 'none' scheduler, and this way may save
                 * us one extra enqueue & dequeue to sw queue.
                 */
-               if (!hctx->dispatch_busy && !e && !run_queue_async)
+               if (!hctx->dispatch_busy && !e && !run_queue_async) {
                        blk_mq_try_issue_list_directly(hctx, list);
-               else
-                       blk_mq_insert_requests(hctx, ctx, list);
+                       if (list_empty(list))
+                               return;
+               }
+               blk_mq_insert_requests(hctx, ctx, list);
        }
 
        blk_mq_run_hw_queue(hctx, run_queue_async);
index 3ff3d7b4996973458fa44a89133ed4ec5b65b2d4..fc60ed7e940ead5ae7d7332ee9f64b9ffe922aca 100644 (file)
@@ -654,6 +654,13 @@ bool blk_mq_complete_request(struct request *rq)
 }
 EXPORT_SYMBOL(blk_mq_complete_request);
 
+void blk_mq_complete_request_sync(struct request *rq)
+{
+       WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
+       rq->q->mq_ops->complete(rq);
+}
+EXPORT_SYMBOL_GPL(blk_mq_complete_request_sync);
+
 int blk_mq_request_started(struct request *rq)
 {
        return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
@@ -1711,11 +1718,12 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
        unsigned int depth;
 
        list_splice_init(&plug->mq_list, &list);
-       plug->rq_count = 0;
 
        if (plug->rq_count > 2 && plug->multiple_queues)
                list_sort(NULL, &list, plug_rq_cmp);
 
+       plug->rq_count = 0;
+
        this_q = NULL;
        this_hctx = NULL;
        this_ctx = NULL;
@@ -1800,74 +1808,76 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
        return ret;
 }
 
-blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
                                                struct request *rq,
                                                blk_qc_t *cookie,
-                                               bool bypass, bool last)
+                                               bool bypass_insert, bool last)
 {
        struct request_queue *q = rq->q;
        bool run_queue = true;
-       blk_status_t ret = BLK_STS_RESOURCE;
-       int srcu_idx;
-       bool force = false;
 
-       hctx_lock(hctx, &srcu_idx);
        /*
-        * hctx_lock is needed before checking quiesced flag.
+        * RCU or SRCU read lock is needed before checking quiesced flag.
         *
-        * When queue is stopped or quiesced, ignore 'bypass', insert
-        * and return BLK_STS_OK to caller, and avoid driver to try to
-        * dispatch again.
+        * When queue is stopped or quiesced, ignore 'bypass_insert' from
+        * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
+        * and avoid driver to try to dispatch again.
         */
-       if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) {
+       if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
                run_queue = false;
-               bypass = false;
-               goto out_unlock;
+               bypass_insert = false;
+               goto insert;
        }
 
-       if (unlikely(q->elevator && !bypass))
-               goto out_unlock;
+       if (q->elevator && !bypass_insert)
+               goto insert;
 
        if (!blk_mq_get_dispatch_budget(hctx))
-               goto out_unlock;
+               goto insert;
 
        if (!blk_mq_get_driver_tag(rq)) {
                blk_mq_put_dispatch_budget(hctx);
-               goto out_unlock;
+               goto insert;
        }
 
-       /*
-        * Always add a request that has been through
-        *.queue_rq() to the hardware dispatch list.
-        */
-       force = true;
-       ret = __blk_mq_issue_directly(hctx, rq, cookie, last);
-out_unlock:
+       return __blk_mq_issue_directly(hctx, rq, cookie, last);
+insert:
+       if (bypass_insert)
+               return BLK_STS_RESOURCE;
+
+       blk_mq_request_bypass_insert(rq, run_queue);
+       return BLK_STS_OK;
+}
+
+static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+               struct request *rq, blk_qc_t *cookie)
+{
+       blk_status_t ret;
+       int srcu_idx;
+
+       might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
+
+       hctx_lock(hctx, &srcu_idx);
+
+       ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
+       if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
+               blk_mq_request_bypass_insert(rq, true);
+       else if (ret != BLK_STS_OK)
+               blk_mq_end_request(rq, ret);
+
+       hctx_unlock(hctx, srcu_idx);
+}
+
+blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
+{
+       blk_status_t ret;
+       int srcu_idx;
+       blk_qc_t unused_cookie;
+       struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+
+       hctx_lock(hctx, &srcu_idx);
+       ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
        hctx_unlock(hctx, srcu_idx);
-       switch (ret) {
-       case BLK_STS_OK:
-               break;
-       case BLK_STS_DEV_RESOURCE:
-       case BLK_STS_RESOURCE:
-               if (force) {
-                       blk_mq_request_bypass_insert(rq, run_queue);
-                       /*
-                        * We have to return BLK_STS_OK for the DM
-                        * to avoid livelock. Otherwise, we return
-                        * the real result to indicate whether the
-                        * request is direct-issued successfully.
-                        */
-                       ret = bypass ? BLK_STS_OK : ret;
-               } else if (!bypass) {
-                       blk_mq_sched_insert_request(rq, false,
-                                                   run_queue, false);
-               }
-               break;
-       default:
-               if (!bypass)
-                       blk_mq_end_request(rq, ret);
-               break;
-       }
 
        return ret;
 }
@@ -1875,20 +1885,22 @@ out_unlock:
 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                struct list_head *list)
 {
-       blk_qc_t unused;
-       blk_status_t ret = BLK_STS_OK;
-
        while (!list_empty(list)) {
+               blk_status_t ret;
                struct request *rq = list_first_entry(list, struct request,
                                queuelist);
 
                list_del_init(&rq->queuelist);
-               if (ret == BLK_STS_OK)
-                       ret = blk_mq_try_issue_directly(hctx, rq, &unused,
-                                                       false,
+               ret = blk_mq_request_issue_directly(rq, list_empty(list));
+               if (ret != BLK_STS_OK) {
+                       if (ret == BLK_STS_RESOURCE ||
+                                       ret == BLK_STS_DEV_RESOURCE) {
+                               blk_mq_request_bypass_insert(rq,
                                                        list_empty(list));
-               else
-                       blk_mq_sched_insert_request(rq, false, true, false);
+                               break;
+                       }
+                       blk_mq_end_request(rq, ret);
+               }
        }
 
        /*
@@ -1896,7 +1908,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
         * the driver there was more coming, but that turned out to
         * be a lie.
         */
-       if (ret != BLK_STS_OK && hctx->queue->mq_ops->commit_rqs)
+       if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs)
                hctx->queue->mq_ops->commit_rqs(hctx);
 }
 
@@ -2003,19 +2015,21 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                        plug->rq_count--;
                }
                blk_add_rq_to_plug(plug, rq);
+               trace_block_plug(q);
 
                blk_mq_put_ctx(data.ctx);
 
                if (same_queue_rq) {
                        data.hctx = same_queue_rq->mq_hctx;
+                       trace_block_unplug(q, 1, true);
                        blk_mq_try_issue_directly(data.hctx, same_queue_rq,
-                                       &cookie, false, true);
+                                       &cookie);
                }
        } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
                        !data.hctx->dispatch_busy)) {
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
-               blk_mq_try_issue_directly(data.hctx, rq, &cookie, false, true);
+               blk_mq_try_issue_directly(data.hctx, rq, &cookie);
        } else {
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
@@ -2332,7 +2346,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
        return 0;
 
  free_fq:
-       kfree(hctx->fq);
+       blk_free_flush_queue(hctx->fq);
  exit_hctx:
        if (set->ops->exit_hctx)
                set->ops->exit_hctx(hctx, hctx_idx);
@@ -3121,6 +3135,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
                }
                if (ret)
                        break;
+               if (q->elevator && q->elevator->type->ops.depth_updated)
+                       q->elevator->type->ops.depth_updated(hctx);
        }
 
        if (!ret)
index d704fc7766f45458fd7f186a0111c859e09baafc..423ea88ab6fbaac08b4fe1367e6a778fdb70641a 100644 (file)
@@ -70,10 +70,8 @@ void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
                                struct list_head *list);
 
-blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
-                                               struct request *rq,
-                                               blk_qc_t *cookie,
-                                               bool bypass, bool last);
+/* Used by blk_insert_cloned_request() to issue request directly */
+blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                                    struct list_head *list);
 
index f267633cf13ac83e81d7c1e8c7462e18228a2e38..d18a37629f0537271fcfd95912e0b9f1eac2133e 100644 (file)
@@ -5634,7 +5634,49 @@ static const struct hash_testvec poly1305_tv_template[] = {
                .psize          = 80,
                .digest         = "\x13\x00\x00\x00\x00\x00\x00\x00"
                                  "\x00\x00\x00\x00\x00\x00\x00\x00",
-       },
+       }, { /* Regression test for overflow in AVX2 implementation */
+               .plaintext      = "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff",
+               .psize          = 300,
+               .digest         = "\xfb\x5e\x96\xd8\x61\xd5\xc7\xc8"
+                                 "\x78\xe5\x87\xcc\x2d\x5a\x22\xe1",
+       }
 };
 
 /* NHPoly1305 test vectors from https://github.com/google/adiantum */
index 62d3aa74277b4d03cb4bd1e7d5cee705864bd41b..5e9d7348c16f784f93ea117d537dbfbfe454a783 100644 (file)
@@ -81,8 +81,12 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
 
        ACPI_FUNCTION_TRACE(ev_enable_gpe);
 
-       /* Enable the requested GPE */
+       /* Clear the GPE status */
+       status = acpi_hw_clear_gpe(gpe_event_info);
+       if (ACPI_FAILURE(status))
+               return_ACPI_STATUS(status);
 
+       /* Enable the requested GPE */
        status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
        return_ACPI_STATUS(status);
 }
index 8638f43cfc3d87184c9a0cc91318f07ab5abff8a..79d86da1c8924a971bacf928008634c8aa3224ca 100644 (file)
@@ -186,6 +186,10 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
                }
        }
 
+       if (obj_desc->common.type == ACPI_TYPE_REGION) {
+               acpi_ut_remove_address_range(obj_desc->region.space_id, node);
+       }
+
        /* Clear the Node entry in all cases */
 
        node->object = NULL;
index 5a389a4f4f652edda26c109baf5e595bf6325903..f1ed0befe303d241c4537e446daad3726e62dbb4 100644 (file)
@@ -567,6 +567,12 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
                goto out;
        }
 
+       dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
+                       cmd_name, out_obj->buffer.length);
+       print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
+                       out_obj->buffer.pointer,
+                       min_t(u32, 128, out_obj->buffer.length), true);
+
        if (call_pkg) {
                call_pkg->nd_fw_size = out_obj->buffer.length;
                memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
@@ -585,12 +591,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
                return 0;
        }
 
-       dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
-                       cmd_name, out_obj->buffer.length);
-       print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
-                       out_obj->buffer.pointer,
-                       min_t(u32, 128, out_obj->buffer.length), true);
-
        for (i = 0, offset = 0; i < desc->out_num; i++) {
                u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
                                (u32 *) out_obj->buffer.pointer,
index f70de71f79d6a699442f430dfa6606ad18a8f2dc..cddd0fcf622c3314f7115e86124e9dde5a5f98ff 100644 (file)
@@ -122,9 +122,8 @@ static int intel_security_change_key(struct nvdimm *nvdimm,
        if (!test_bit(cmd, &nfit_mem->dsm_mask))
                return -ENOTTY;
 
-       if (old_data)
-               memcpy(nd_cmd.cmd.old_pass, old_data->data,
-                               sizeof(nd_cmd.cmd.old_pass));
+       memcpy(nd_cmd.cmd.old_pass, old_data->data,
+                       sizeof(nd_cmd.cmd.old_pass));
        memcpy(nd_cmd.cmd.new_pass, new_data->data,
                        sizeof(nd_cmd.cmd.new_pass));
        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
@@ -336,9 +335,8 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
 
        /* flush all cache before we erase DIMM */
        nvdimm_invalidate_cache();
-       if (nkey)
-               memcpy(nd_cmd.cmd.passphrase, nkey->data,
-                               sizeof(nd_cmd.cmd.passphrase));
+       memcpy(nd_cmd.cmd.passphrase, nkey->data,
+                       sizeof(nd_cmd.cmd.passphrase));
        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
        if (rc < 0)
                return rc;
index cb8347500ce2871e5003d8ce45a4014a97b8de3e..e49028a604295937a59761488d431b9f4837f731 100644 (file)
@@ -506,7 +506,7 @@ static ssize_t probe_store(struct device *dev, struct device_attribute *attr,
 
        ret = lock_device_hotplug_sysfs();
        if (ret)
-               goto out;
+               return ret;
 
        nid = memory_add_physaddr_to_nid(phys_addr);
        ret = __add_memory(nid, phys_addr,
index 417a9f15c11631cae518a9924c0e480fd6b85fd2..d7ac09c092f2ac8a5caf8632dca569b38b9472a6 100644 (file)
@@ -1748,6 +1748,11 @@ static int __init null_init(void)
                return -EINVAL;
        }
 
+       if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
+               pr_err("null_blk: invalid home_node value\n");
+               g_home_node = NUMA_NO_NODE;
+       }
+
        if (g_queue_mode == NULL_Q_RQ) {
                pr_err("null_blk: legacy IO path no longer available\n");
                return -EINVAL;
index 377a694dc22814b9d040a64a9d3ffd7666f5a6a4..6d415b20fb70651c10aa87af6b7f18e53f0aaddd 100644 (file)
@@ -314,6 +314,7 @@ static void pcd_init_units(void)
                disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops,
                                                   1, BLK_MQ_F_SHOULD_MERGE);
                if (IS_ERR(disk->queue)) {
+                       put_disk(disk);
                        disk->queue = NULL;
                        continue;
                }
@@ -750,6 +751,8 @@ static int pcd_detect(void)
 
        printk("%s: No CD-ROM drive found\n", name);
        for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
+               if (!cd->disk)
+                       continue;
                blk_cleanup_queue(cd->disk->queue);
                cd->disk->queue = NULL;
                blk_mq_free_tag_set(&cd->tag_set);
@@ -1010,8 +1013,14 @@ static int __init pcd_init(void)
        pcd_probe_capabilities();
 
        if (register_blkdev(major, name)) {
-               for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
+               for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
+                       if (!cd->disk)
+                               continue;
+
+                       blk_cleanup_queue(cd->disk->queue);
+                       blk_mq_free_tag_set(&cd->tag_set);
                        put_disk(cd->disk);
+               }
                return -EBUSY;
        }
 
@@ -1032,6 +1041,9 @@ static void __exit pcd_exit(void)
        int unit;
 
        for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
+               if (!cd->disk)
+                       continue;
+
                if (cd->present) {
                        del_gendisk(cd->disk);
                        pi_release(cd->pi);
index 103b617cdc3184c0a381e569fbb8d8c81894585c..35e6e271b219ccbe524e60499f78ff65fcb67994 100644 (file)
@@ -762,6 +762,8 @@ static int pf_detect(void)
 
        printk("%s: No ATAPI disk detected\n", name);
        for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
+               if (!pf->disk)
+                       continue;
                blk_cleanup_queue(pf->disk->queue);
                pf->disk->queue = NULL;
                blk_mq_free_tag_set(&pf->tag_set);
@@ -1029,8 +1031,13 @@ static int __init pf_init(void)
        pf_busy = 0;
 
        if (register_blkdev(major, name)) {
-               for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
+               for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
+                       if (!pf->disk)
+                               continue;
+                       blk_cleanup_queue(pf->disk->queue);
+                       blk_mq_free_tag_set(&pf->tag_set);
                        put_disk(pf->disk);
+               }
                return -EBUSY;
        }
 
@@ -1051,6 +1058,9 @@ static void __exit pf_exit(void)
        int unit;
        unregister_blkdev(major, name);
        for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
+               if (!pf->disk)
+                       continue;
+
                if (pf->present)
                        del_gendisk(pf->disk);
 
index 4bc083b7c9b541a0fede52156bdae003c1d678df..2a7ca4a1e6f7bd5e2730b13a30760f9506c6973c 100644 (file)
@@ -513,6 +513,8 @@ static int init_vq(struct virtio_blk *vblk)
        if (err)
                num_vqs = 1;
 
+       num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
+
        vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
        if (!vblk->vqs)
                return -ENOMEM;
index 87ccef4bd69e904b1f19403e82403bd5cd13a277..32a21b8d1d85f430a031d342bff3a646c5ffa427 100644 (file)
@@ -1090,6 +1090,8 @@ static int ace_setup(struct ace_device *ace)
        return 0;
 
 err_read:
+       /* prevent double queue cleanup */
+       ace->gd->queue = NULL;
        put_disk(ace->gd);
 err_alloc_disk:
        blk_cleanup_queue(ace->queue);
index ded198328f216066959825950ebfbe5aef29027d..7db48ae65cd2dc946b6a1757baa20993158b903e 100644 (file)
@@ -2942,6 +2942,7 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
                return 0;
        }
 
+       irq_set_status_flags(irq, IRQ_NOAUTOEN);
        ret = devm_request_irq(&hdev->dev, irq, btusb_oob_wake_handler,
                               0, "OOB Wake-on-BT", data);
        if (ret) {
@@ -2956,7 +2957,6 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
        }
 
        data->oob_wake_irq = irq;
-       disable_irq(irq);
        bt_dev_info(hdev, "OOB Wake-on-BT configured at IRQ %u", irq);
        return 0;
 }
index 72866a004f075b79257c9d2df0c7b5b60852c31e..466ebd84ad1774096ecc45dd9f4ebe13ac785602 100644 (file)
@@ -348,7 +348,7 @@ config XILINX_HWICAP
 
 config R3964
        tristate "Siemens R3964 line discipline"
-       depends on TTY
+       depends on TTY && BROKEN
        ---help---
          This driver allows synchronous communication with devices using the
          Siemens R3964 packet protocol. Unless you are dealing with special
index d0ad85900b7948e68d1654a410f2c5dde6e6511e..3a1e6b3ccd10d0203847614e16bafce03808fb30 100644 (file)
@@ -973,6 +973,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
        if (ACPI_SUCCESS(status)) {
                hdp->hd_phys_address = addr.address.minimum;
                hdp->hd_address = ioremap(addr.address.minimum, addr.address.address_length);
+               if (!hdp->hd_address)
+                       return AE_ERROR;
 
                if (hpet_is_known(hdp)) {
                        iounmap(hdp->hd_address);
index ff0b199be4729757743bbd72bff2fc61a842d291..f2411468f33ff44707e45ab34cd359d2c3b5a0f0 100644 (file)
@@ -66,7 +66,6 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
                return;
        }
 
-       memset(&p, 0, sizeof(p));
        p.addr = base_addr;
        p.space = space;
        p.regspacing = offset;
index e8ba678347466db181a08768158e56930090aa7b..00bf4b17edbfafb5c9d25cb524f35e8d59c7a074 100644 (file)
@@ -214,6 +214,9 @@ struct ipmi_user {
 
        /* Does this interface receive IPMI events? */
        bool gets_events;
+
+       /* Free must run in process context for RCU cleanup. */
+       struct work_struct remove_work;
 };
 
 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
@@ -1157,6 +1160,15 @@ static int intf_err_seq(struct ipmi_smi *intf,
        return rv;
 }
 
+static void free_user_work(struct work_struct *work)
+{
+       struct ipmi_user *user = container_of(work, struct ipmi_user,
+                                             remove_work);
+
+       cleanup_srcu_struct(&user->release_barrier);
+       kfree(user);
+}
+
 int ipmi_create_user(unsigned int          if_num,
                     const struct ipmi_user_hndl *handler,
                     void                  *handler_data,
@@ -1200,6 +1212,8 @@ int ipmi_create_user(unsigned int          if_num,
        goto out_kfree;
 
  found:
+       INIT_WORK(&new_user->remove_work, free_user_work);
+
        rv = init_srcu_struct(&new_user->release_barrier);
        if (rv)
                goto out_kfree;
@@ -1260,8 +1274,9 @@ EXPORT_SYMBOL(ipmi_get_smi_info);
 static void free_user(struct kref *ref)
 {
        struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
-       cleanup_srcu_struct(&user->release_barrier);
-       kfree(user);
+
+       /* SRCU cleanup must happen in task context. */
+       schedule_work(&user->remove_work);
 }
 
 static void _ipmi_destroy_user(struct ipmi_user *user)
index 01946cad3d1381ba7eed020544c775ed9a6e3f5f..682221eebd66101cb67b04e2ac979ab7caeae51c 100644 (file)
@@ -118,6 +118,8 @@ void __init ipmi_hardcode_init(void)
        char *str;
        char *si_type[SI_MAX_PARMS];
 
+       memset(si_type, 0, sizeof(si_type));
+
        /* Parse out the si_type string into its components. */
        str = si_type_str;
        if (*str != '\0') {
index d8b77133a83a2a2c59d3d7873db8d4d110f5dfb0..f824563fc28dd091303f89de651589ea5350a64b 100644 (file)
@@ -37,8 +37,8 @@
  *
  * Returns size of the event. If it is an invalid event, returns 0.
  */
-static int calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
-                               struct tcg_pcr_event *event_header)
+static size_t calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
+                                  struct tcg_pcr_event *event_header)
 {
        struct tcg_efi_specid_event_head *efispecid;
        struct tcg_event_field *event_field;
index 8856cce5a23b2858b58b69373f4cd89e2f898abb..817ae09a369ec2ba192a68f302205eaef7aadeb5 100644 (file)
@@ -233,12 +233,19 @@ __poll_t tpm_common_poll(struct file *file, poll_table *wait)
        __poll_t mask = 0;
 
        poll_wait(file, &priv->async_wait, wait);
+       mutex_lock(&priv->buffer_mutex);
 
-       if (!priv->response_read || priv->response_length)
+       /*
+        * The response_length indicates if there is still response
+        * (or part of it) to be consumed. Partial reads decrease it
+        * by the number of bytes read, and write resets it the zero.
+        */
+       if (priv->response_length)
                mask = EPOLLIN | EPOLLRDNORM;
        else
                mask = EPOLLOUT | EPOLLWRNORM;
 
+       mutex_unlock(&priv->buffer_mutex);
        return mask;
 }
 
index 83ece5639f8639e7bb397ec4feeb121006073208..ae1030c9b086de511aa5c2bd0800fdcf6c2a1051 100644 (file)
@@ -402,15 +402,13 @@ int tpm_pm_suspend(struct device *dev)
        if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED)
                return 0;
 
-       if (chip->flags & TPM_CHIP_FLAG_TPM2) {
-               mutex_lock(&chip->tpm_mutex);
-               if (!tpm_chip_start(chip)) {
+       if (!tpm_chip_start(chip)) {
+               if (chip->flags & TPM_CHIP_FLAG_TPM2)
                        tpm2_shutdown(chip, TPM2_SU_STATE);
-                       tpm_chip_stop(chip);
-               }
-               mutex_unlock(&chip->tpm_mutex);
-       } else {
-               rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
+               else
+                       rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
+
+               tpm_chip_stop(chip);
        }
 
        return rc;
index 89d6f3736dbf605036e4eefb70efd2ef2ee4f386..f8edbb65eda3564cf99623c852fe474a93803123 100644 (file)
@@ -20,8 +20,7 @@
 #define PROG_ID_MAX            7
 
 #define PROG_STATUS_MASK(id)   (1 << ((id) + 8))
-#define PROG_PRES_MASK         0x7
-#define PROG_PRES(layout, pckr)        ((pckr >> layout->pres_shift) & PROG_PRES_MASK)
+#define PROG_PRES(layout, pckr)        ((pckr >> layout->pres_shift) & layout->pres_mask)
 #define PROG_MAX_RM9200_CSS    3
 
 struct clk_programmable {
@@ -37,20 +36,29 @@ static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
                                                  unsigned long parent_rate)
 {
        struct clk_programmable *prog = to_clk_programmable(hw);
+       const struct clk_programmable_layout *layout = prog->layout;
        unsigned int pckr;
+       unsigned long rate;
 
        regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr);
 
-       return parent_rate >> PROG_PRES(prog->layout, pckr);
+       if (layout->is_pres_direct)
+               rate = parent_rate / (PROG_PRES(layout, pckr) + 1);
+       else
+               rate = parent_rate >> PROG_PRES(layout, pckr);
+
+       return rate;
 }
 
 static int clk_programmable_determine_rate(struct clk_hw *hw,
                                           struct clk_rate_request *req)
 {
+       struct clk_programmable *prog = to_clk_programmable(hw);
+       const struct clk_programmable_layout *layout = prog->layout;
        struct clk_hw *parent;
        long best_rate = -EINVAL;
        unsigned long parent_rate;
-       unsigned long tmp_rate;
+       unsigned long tmp_rate = 0;
        int shift;
        int i;
 
@@ -60,10 +68,18 @@ static int clk_programmable_determine_rate(struct clk_hw *hw,
                        continue;
 
                parent_rate = clk_hw_get_rate(parent);
-               for (shift = 0; shift < PROG_PRES_MASK; shift++) {
-                       tmp_rate = parent_rate >> shift;
-                       if (tmp_rate <= req->rate)
-                               break;
+               if (layout->is_pres_direct) {
+                       for (shift = 0; shift <= layout->pres_mask; shift++) {
+                               tmp_rate = parent_rate / (shift + 1);
+                               if (tmp_rate <= req->rate)
+                                       break;
+                       }
+               } else {
+                       for (shift = 0; shift < layout->pres_mask; shift++) {
+                               tmp_rate = parent_rate >> shift;
+                               if (tmp_rate <= req->rate)
+                                       break;
+                       }
                }
 
                if (tmp_rate > req->rate)
@@ -137,16 +153,23 @@ static int clk_programmable_set_rate(struct clk_hw *hw, unsigned long rate,
        if (!div)
                return -EINVAL;
 
-       shift = fls(div) - 1;
+       if (layout->is_pres_direct) {
+               shift = div - 1;
 
-       if (div != (1 << shift))
-               return -EINVAL;
+               if (shift > layout->pres_mask)
+                       return -EINVAL;
+       } else {
+               shift = fls(div) - 1;
 
-       if (shift >= PROG_PRES_MASK)
-               return -EINVAL;
+               if (div != (1 << shift))
+                       return -EINVAL;
+
+               if (shift >= layout->pres_mask)
+                       return -EINVAL;
+       }
 
        regmap_update_bits(prog->regmap, AT91_PMC_PCKR(prog->id),
-                          PROG_PRES_MASK << layout->pres_shift,
+                          layout->pres_mask << layout->pres_shift,
                           shift << layout->pres_shift);
 
        return 0;
@@ -202,19 +225,25 @@ at91_clk_register_programmable(struct regmap *regmap,
 }
 
 const struct clk_programmable_layout at91rm9200_programmable_layout = {
+       .pres_mask = 0x7,
        .pres_shift = 2,
        .css_mask = 0x3,
        .have_slck_mck = 0,
+       .is_pres_direct = 0,
 };
 
 const struct clk_programmable_layout at91sam9g45_programmable_layout = {
+       .pres_mask = 0x7,
        .pres_shift = 2,
        .css_mask = 0x3,
        .have_slck_mck = 1,
+       .is_pres_direct = 0,
 };
 
 const struct clk_programmable_layout at91sam9x5_programmable_layout = {
+       .pres_mask = 0x7,
        .pres_shift = 4,
        .css_mask = 0x7,
        .have_slck_mck = 0,
+       .is_pres_direct = 0,
 };
index 672a79bda88c960d7655a600834705152f5a4eaf..a0e5ce9c9b9ea6981948ed102be4f8a870e6a99c 100644 (file)
@@ -71,9 +71,11 @@ struct clk_pll_characteristics {
 };
 
 struct clk_programmable_layout {
+       u8 pres_mask;
        u8 pres_shift;
        u8 css_mask;
        u8 have_slck_mck;
+       u8 is_pres_direct;
 };
 
 extern const struct clk_programmable_layout at91rm9200_programmable_layout;
index 1f70cb164b06f310d867d54797dfd0d87be0a1d3..81943fac4537ef3c8e8d0f611a897b2dd6b8171f 100644 (file)
@@ -125,6 +125,14 @@ static const struct {
          .pll = true },
 };
 
+static const struct clk_programmable_layout sama5d2_programmable_layout = {
+       .pres_mask = 0xff,
+       .pres_shift = 4,
+       .css_mask = 0x7,
+       .have_slck_mck = 0,
+       .is_pres_direct = 1,
+};
+
 static void __init sama5d2_pmc_setup(struct device_node *np)
 {
        struct clk_range range = CLK_RANGE(0, 0);
@@ -249,7 +257,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
 
                hw = at91_clk_register_programmable(regmap, name,
                                                    parent_names, 6, i,
-                                                   &at91sam9x5_programmable_layout);
+                                                   &sama5d2_programmable_layout);
                if (IS_ERR(hw))
                        goto err_free;
        }
index 1acfa3e3cfb401667fbb19666f1aac2add081826..113d71042199b3d3599da84df07e932b1c7902a9 100644 (file)
@@ -362,7 +362,7 @@ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
 
        switch (pll_clk->type) {
        case PLL_1416X:
-               if (!pll->rate_table)
+               if (!pll_clk->rate_table)
                        init.ops = &clk_pll1416x_min_ops;
                else
                        init.ops = &clk_pll1416x_ops;
index 9628d4e7690bbdc632f2930bd6feb9653347595b..85daf826619ab4fe707b03211243279a06464625 100644 (file)
@@ -169,11 +169,10 @@ struct clk *mtk_clk_register_gate(
                return ERR_PTR(-ENOMEM);
 
        init.name = name;
-       init.flags = CLK_SET_RATE_PARENT;
+       init.flags = flags | CLK_SET_RATE_PARENT;
        init.parent_names = parent_name ? &parent_name : NULL;
        init.num_parents = parent_name ? 1 : 0;
        init.ops = ops;
-       init.flags = flags;
 
        cg->regmap = regmap;
        cg->set_ofs = set_ofs;
index 41e16dd7272a5943c842eda7064bd6c0394c3664..7a14ac9b2fecfece592807d72c38043bb84c12e4 100644 (file)
@@ -120,7 +120,7 @@ static bool meson_clk_pll_is_better(unsigned long rate,
                        return true;
        } else {
                /* Round down */
-               if (now < rate && best < now)
+               if (now <= rate && best < now)
                        return true;
        }
 
index 0e1ce8c03259b73221266de7aa6491be331815f3..f7b11e1eeebe894c26425067fb5cf14cab09a029 100644 (file)
@@ -960,14 +960,14 @@ static struct clk_regmap g12a_sd_emmc_c_clk0 = {
 /* VPU Clock */
 
 static const char * const g12a_vpu_parent_names[] = {
-       "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7",
+       "fclk_div3", "fclk_div4", "fclk_div5", "fclk_div7",
        "mpll1", "vid_pll", "hifi_pll", "gp0_pll",
 };
 
 static struct clk_regmap g12a_vpu_0_sel = {
        .data = &(struct clk_regmap_mux_data){
                .offset = HHI_VPU_CLK_CNTL,
-               .mask = 0x3,
+               .mask = 0x7,
                .shift = 9,
        },
        .hw.init = &(struct clk_init_data){
@@ -1011,7 +1011,7 @@ static struct clk_regmap g12a_vpu_0 = {
 static struct clk_regmap g12a_vpu_1_sel = {
        .data = &(struct clk_regmap_mux_data){
                .offset = HHI_VPU_CLK_CNTL,
-               .mask = 0x3,
+               .mask = 0x7,
                .shift = 25,
        },
        .hw.init = &(struct clk_init_data){
index 04df2e208ed6ec7a493f62debdbf9051e0528038..29ffb4fde7145adefe855c11283825e1e4ea1f11 100644 (file)
@@ -2216,6 +2216,7 @@ static struct clk_regmap gxbb_vdec_1_div = {
                .offset = HHI_VDEC_CLK_CNTL,
                .shift = 0,
                .width = 7,
+               .flags = CLK_DIVIDER_ROUND_CLOSEST,
        },
        .hw.init = &(struct clk_init_data){
                .name = "vdec_1_div",
@@ -2261,6 +2262,7 @@ static struct clk_regmap gxbb_vdec_hevc_div = {
                .offset = HHI_VDEC2_CLK_CNTL,
                .shift = 16,
                .width = 7,
+               .flags = CLK_DIVIDER_ROUND_CLOSEST,
        },
        .hw.init = &(struct clk_init_data){
                .name = "vdec_hevc_div",
index 08bcc01c0923863790d32dafbc1274bdf2358a28..daff235bc763348a03bebdcd9695cc8e08856744 100644 (file)
@@ -82,8 +82,8 @@ static unsigned long meson_vid_pll_div_recalc_rate(struct clk_hw *hw,
        div = _get_table_val(meson_parm_read(clk->map, &pll_div->val),
                             meson_parm_read(clk->map, &pll_div->sel));
        if (!div || !div->divider) {
-               pr_info("%s: Invalid config value for vid_pll_div\n", __func__);
-               return parent_rate;
+               pr_debug("%s: Invalid config value for vid_pll_div\n", __func__);
+               return 0;
        }
 
        return DIV_ROUND_UP_ULL(parent_rate * div->multiplier, div->divider);
index d977193842dfed1fead553dd2240013c5a0d380a..19174835693b91cd473b59c7fed787f128f268d1 100644 (file)
@@ -165,7 +165,7 @@ static const struct clk_ops plt_clk_ops = {
 };
 
 static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
-                                       void __iomem *base,
+                                       const struct pmc_clk_data *pmc_data,
                                        const char **parent_names,
                                        int num_parents)
 {
@@ -184,9 +184,17 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
        init.num_parents = num_parents;
 
        pclk->hw.init = &init;
-       pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
+       pclk->reg = pmc_data->base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
        spin_lock_init(&pclk->lock);
 
+       /*
+        * On some systems, the pmc_plt_clocks already enabled by the
+        * firmware are being marked as critical to avoid them being
+        * gated by the clock framework.
+        */
+       if (pmc_data->critical && plt_clk_is_enabled(&pclk->hw))
+               init.flags |= CLK_IS_CRITICAL;
+
        ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
        if (ret) {
                pclk = ERR_PTR(ret);
@@ -332,7 +340,7 @@ static int plt_clk_probe(struct platform_device *pdev)
                return PTR_ERR(parent_names);
 
        for (i = 0; i < PMC_CLK_NUM; i++) {
-               data->clks[i] = plt_clk_register(pdev, i, pmc_data->base,
+               data->clks[i] = plt_clk_register(pdev, i, pmc_data,
                                                 parent_names, data->nparents);
                if (IS_ERR(data->clks[i])) {
                        err = PTR_ERR(data->clks[i]);
index 171502a356aa1fb19bf285cdc5aade19ab861723..4b3d143f0f8a4445df12fcf589bb67fdfaa985df 100644 (file)
@@ -145,6 +145,7 @@ config VT8500_TIMER
 config NPCM7XX_TIMER
        bool "NPCM7xx timer driver" if COMPILE_TEST
        depends on HAS_IOMEM
+       select TIMER_OF
        select CLKSRC_MMIO
        help
          Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture,
index aa4ec53281cea585214c3a5f8b4faf941e7e7bd3..ea373cfbcecb5d8241f6a176a4a32a86a630c083 100644 (file)
@@ -9,7 +9,7 @@
  * published by the Free Software Foundation.
  */
 
-#define pr_fmt(fmt)    "arm_arch_timer: " fmt
+#define pr_fmt(fmt)    "arch_timer: " fmt
 
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -33,9 +33,6 @@
 
 #include <clocksource/arm_arch_timer.h>
 
-#undef pr_fmt
-#define pr_fmt(fmt) "arch_timer: " fmt
-
 #define CNTTIDR                0x08
 #define CNTTIDR_VIRT(n)        (BIT(1) << ((n) * 4))
 
index eed6feff8b5f23673de989932afcd806e858ecfc..30c6f4ce672b3b1ac16645159398c66e3129aae9 100644 (file)
@@ -296,4 +296,4 @@ err_alloc:
 TIMER_OF_DECLARE(ox810se_rps,
                       "oxsemi,ox810se-rps-timer", oxnas_rps_timer_init);
 TIMER_OF_DECLARE(ox820_rps,
-                      "oxsemi,ox820se-rps-timer", oxnas_rps_timer_init);
+                      "oxsemi,ox820-rps-timer", oxnas_rps_timer_init);
index 3352da6ed61f39139eb46aba585dfa0136697e80..ee8ec5a8cb1668aa770fb0c99af9dd1dc89a3ad8 100644 (file)
@@ -585,34 +585,6 @@ static int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
        return 0;
 }
 
-/* Optimized set_load which removes costly spin wait in timer_start */
-static int omap_dm_timer_set_load_start(struct omap_dm_timer *timer,
-                                       int autoreload, unsigned int load)
-{
-       u32 l;
-
-       if (unlikely(!timer))
-               return -EINVAL;
-
-       omap_dm_timer_enable(timer);
-
-       l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
-       if (autoreload) {
-               l |= OMAP_TIMER_CTRL_AR;
-               omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
-       } else {
-               l &= ~OMAP_TIMER_CTRL_AR;
-       }
-       l |= OMAP_TIMER_CTRL_ST;
-
-       __omap_dm_timer_load_start(timer, l, load, timer->posted);
-
-       /* Save the context */
-       timer->context.tclr = l;
-       timer->context.tldr = load;
-       timer->context.tcrr = load;
-       return 0;
-}
 static int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
                                   unsigned int match)
 {
index b599c7318aab4ea9302779b8527ece4a91151914..2986119dd31fb8391e3256a88942272870acad44 100644 (file)
@@ -2596,6 +2596,9 @@ static int __init intel_pstate_init(void)
        const struct x86_cpu_id *id;
        int rc;
 
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+               return -ENODEV;
+
        if (no_load)
                return -ENODEV;
 
@@ -2611,7 +2614,7 @@ static int __init intel_pstate_init(void)
        } else {
                id = x86_match_cpu(intel_pstate_cpu_ids);
                if (!id) {
-                       pr_info("CPU ID not supported\n");
+                       pr_info("CPU model not supported\n");
                        return -ENODEV;
                }
 
index b1eadc6652b5f236897811357bd49ac364498f5e..7205d9f4029e11adb7f49d0e57bfdbf11b069fc0 100644 (file)
@@ -865,19 +865,18 @@ static int ahash_update_ctx(struct ahash_request *req)
                if (ret)
                        goto unmap_ctx;
 
-               if (mapped_nents) {
+               if (mapped_nents)
                        sg_to_sec4_sg_last(req->src, mapped_nents,
                                           edesc->sec4_sg + sec4_sg_src_index,
                                           0);
-                       if (*next_buflen)
-                               scatterwalk_map_and_copy(next_buf, req->src,
-                                                        to_hash - *buflen,
-                                                        *next_buflen, 0);
-               } else {
+               else
                        sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
                                            1);
-               }
 
+               if (*next_buflen)
+                       scatterwalk_map_and_copy(next_buf, req->src,
+                                                to_hash - *buflen,
+                                                *next_buflen, 0);
                desc = edesc->hw_desc;
 
                edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
index 8e17149655f069ff923e09c943295b469d87d076..540e8cd16ee6ec6ae0205a2f8c6193f129e80825 100644 (file)
@@ -116,7 +116,7 @@ config EXTCON_PALMAS
 
 config EXTCON_PTN5150
        tristate "NXP PTN5150 CC LOGIC USB EXTCON support"
-       depends on I2C && GPIOLIB || COMPILE_TEST
+       depends on I2C && (GPIOLIB || COMPILE_TEST)
        select REGMAP_I2C
        help
          Say Y here to enable support for USB peripheral and USB host
index c0c0b4e4e281c61801bc8df219b927aa869135a6..f240946ed701cb08913b5047e75b99ec4fd4d6f0 100644 (file)
@@ -254,7 +254,7 @@ static int vpd_section_destroy(struct vpd_section *sec)
 
 static int vpd_sections_init(phys_addr_t physaddr)
 {
-       struct vpd_cbmem __iomem *temp;
+       struct vpd_cbmem *temp;
        struct vpd_cbmem header;
        int ret = 0;
 
@@ -262,7 +262,7 @@ static int vpd_sections_init(phys_addr_t physaddr)
        if (!temp)
                return -ENOMEM;
 
-       memcpy_fromio(&header, temp, sizeof(struct vpd_cbmem));
+       memcpy(&header, temp, sizeof(struct vpd_cbmem));
        memunmap(temp);
 
        if (header.magic != VPD_CBMEM_MAGIC)
index 4f8fb4ecde3419fe8449ddfcea859f17242e6919..79fb302fb9543f93cfb9738700f53e34006e869c 100644 (file)
@@ -3165,6 +3165,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
 
                /* No need to recover an evicted BO */
                if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
+                   shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
                    shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
                        continue;
 
@@ -3173,11 +3174,16 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
                        break;
 
                if (fence) {
-                       r = dma_fence_wait_timeout(fence, false, tmo);
+                       tmo = dma_fence_wait_timeout(fence, false, tmo);
                        dma_fence_put(fence);
                        fence = next;
-                       if (r <= 0)
+                       if (tmo == 0) {
+                               r = -ETIMEDOUT;
                                break;
+                       } else if (tmo < 0) {
+                               r = tmo;
+                               break;
+                       }
                } else {
                        fence = next;
                }
@@ -3188,8 +3194,8 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
                tmo = dma_fence_wait_timeout(fence, false, tmo);
        dma_fence_put(fence);
 
-       if (r <= 0 || tmo <= 0) {
-               DRM_ERROR("recover vram bo from shadow failed\n");
+       if (r < 0 || tmo <= 0) {
+               DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
                return -EIO;
        }
 
@@ -3625,6 +3631,7 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
        struct pci_dev *pdev = adev->pdev;
        enum pci_bus_speed cur_speed;
        enum pcie_link_width cur_width;
+       u32 ret = 1;
 
        *speed = PCI_SPEED_UNKNOWN;
        *width = PCIE_LNK_WIDTH_UNKNOWN;
@@ -3632,6 +3639,10 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
        while (pdev) {
                cur_speed = pcie_get_speed_cap(pdev);
                cur_width = pcie_get_width_cap(pdev);
+               ret = pcie_bandwidth_available(adev->pdev, NULL,
+                                                      NULL, &cur_width);
+               if (!ret)
+                       cur_width = PCIE_LNK_WIDTH_RESRV;
 
                if (cur_speed != PCI_SPEED_UNKNOWN) {
                        if (*speed == PCI_SPEED_UNKNOWN)
index 0b8ef2d27d6b2b8e60e0959f0cb8e742e4de3c7f..fe393a46f8811dc452dc3db6d062d0aa850e6b47 100644 (file)
@@ -35,6 +35,7 @@
 #include "amdgpu_trace.h"
 
 #define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000)
+#define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT        msecs_to_jiffies(2000)
 
 /*
  * IB
@@ -344,6 +345,8 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
                 * cost waiting for it coming back under RUNTIME only
                */
                tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
+       } else if (adev->gmc.xgmi.hive_id) {
+               tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT;
        }
 
        for (i = 0; i < adev->num_rings; ++i) {
index d0309e8c9d12cdafa95d2a23e84018f4bb6b8035..a11db2b1a63f41e16acd4df34a24b2f3e6db9140 100644 (file)
@@ -2405,8 +2405,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
        /* disable CG */
        WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
 
-       adev->gfx.rlc.funcs->reset(adev);
-
        gfx_v9_0_init_pg(adev);
 
        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
index d0d966d6080a6dda87d57d2d8ee1ed2b58a1444a..1696644ec022391d24b93df9f1dacd23079bd72e 100644 (file)
@@ -182,6 +182,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
                tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
                                    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
        }
+       WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
 
        tmp = mmVM_L2_CNTL4_DEFAULT;
        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
index 8be9677c0c07dae65e3c69dafab241e3b16ff975..cf9a49f49d3a41a99bb96dc75c38f24e72579367 100644 (file)
@@ -320,6 +320,7 @@ static const struct kfd_deviceid supported_devices[] = {
        { 0x9876, &carrizo_device_info },       /* Carrizo */
        { 0x9877, &carrizo_device_info },       /* Carrizo */
        { 0x15DD, &raven_device_info },         /* Raven */
+       { 0x15D8, &raven_device_info },         /* Raven */
 #endif
        { 0x67A0, &hawaii_device_info },        /* Hawaii */
        { 0x67A1, &hawaii_device_info },        /* Hawaii */
index 81127f7d6ed193c9cb996b685577a807f5e5646e..3082b55b1e774fd31b4293c402c41174df28e9a9 100644 (file)
@@ -4533,6 +4533,7 @@ static void handle_cursor_update(struct drm_plane *plane,
        amdgpu_crtc->cursor_width = plane->state->crtc_w;
        amdgpu_crtc->cursor_height = plane->state->crtc_h;
 
+       memset(&attributes, 0, sizeof(attributes));
        attributes.address.high_part = upper_32_bits(address);
        attributes.address.low_part  = lower_32_bits(address);
        attributes.width             = plane->state->crtc_w;
index c68fbd55db3ca6f01c49b86e60a584dfe8d90ff4..a6cda201c964c5bc918e8d693c2aa2fccf65eb58 100644 (file)
@@ -1377,6 +1377,11 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
                return UPDATE_TYPE_FULL;
        }
 
+       if (u->surface->force_full_update) {
+               update_flags->bits.full_update = 1;
+               return UPDATE_TYPE_FULL;
+       }
+
        type = get_plane_info_update_type(u);
        elevate_update_type(&overall_type, type);
 
@@ -1802,6 +1807,14 @@ void dc_commit_updates_for_stream(struct dc *dc,
                }
 
                dc_resource_state_copy_construct(state, context);
+
+               for (i = 0; i < dc->res_pool->pipe_count; i++) {
+                       struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
+                       struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+                       if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
+                               new_pipe->plane_state->force_full_update = true;
+               }
        }
 
 
@@ -1838,6 +1851,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
                dc->current_state = context;
                dc_release_state(old);
 
+               for (i = 0; i < dc->res_pool->pipe_count; i++) {
+                       struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+                       if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
+                               pipe_ctx->plane_state->force_full_update = false;
+               }
        }
        /*let's use current_state to update watermark etc*/
        if (update_type >= UPDATE_TYPE_FULL)
index 4eba3c4800b63bef00ec9fd532919aa84ca72126..ea18e9c2d8cea5c65582274a297d67b1d0fbb82d 100644 (file)
@@ -2660,12 +2660,18 @@ void core_link_enable_stream(
 void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
 {
        struct dc  *core_dc = pipe_ctx->stream->ctx->dc;
+       struct dc_stream_state *stream = pipe_ctx->stream;
 
        core_dc->hwss.blank_stream(pipe_ctx);
 
        if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
                deallocate_mst_payload(pipe_ctx);
 
+       if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+               dal_ddc_service_write_scdc_data(
+                       stream->link->ddc, 0,
+                       stream->timing.flags.LTE_340MCSC_SCRAMBLE);
+
        core_dc->hwss.disable_stream(pipe_ctx, option);
 
        disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
index 1a7fd6aa77ebb213168cd477452ac1dd94d5aadc..0515095574e735e0535ee17ce3369168557c201e 100644 (file)
@@ -503,6 +503,9 @@ struct dc_plane_state {
        struct dc_plane_status status;
        struct dc_context *ctx;
 
+       /* HACK: Workaround for forcing full reprogramming under some conditions */
+       bool force_full_update;
+
        /* private to dc_surface.c */
        enum dc_irq_source irq_source;
        struct kref refcount;
index 4febf4ef7240e6aef2610063b55f3aee636bbca0..4fe3664fb49508e7f9c07ddc69f5b610fd884d1d 100644 (file)
@@ -190,6 +190,12 @@ static void submit_channel_request(
                                1,
                                0);
        }
+
+       REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
+
+       REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
+                               10, aux110->timeout_period/10);
+
        /* set the delay and the number of bytes to write */
 
        /* The length include
@@ -242,9 +248,6 @@ static void submit_channel_request(
                }
        }
 
-       REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
-       REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
-                               10, aux110->timeout_period/10);
        REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
 }
 
index d27f22c05e4b5abd0085fb7252fccba5296f6831..e28ed6a00ff4236ffaef4346528dc1ecbb179543 100644 (file)
@@ -71,11 +71,11 @@ enum {      /* This is the timeout as defined in DP 1.2a,
         * at most within ~240usec. That means,
         * increasing this timeout will not affect normal operation,
         * and we'll timeout after
-        * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec.
+        * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 2400usec.
         * This timeout is especially important for
-        * resume from S3 and CTS.
+        * converters, resume from S3, and CTS.
         */
-       SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4
+       SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 6
 };
 
 struct dce_aux {
index 683829466a44c4279db97fbb72338fef9851b404..0ba68d41b9c37b91064a5defbbcf62a3160df53f 100644 (file)
@@ -1150,28 +1150,9 @@ void hubp1_cursor_set_position(
        REG_UPDATE(CURSOR_CONTROL,
                        CURSOR_ENABLE, cur_en);
 
-       //account for cases where we see negative offset relative to overlay plane
-       if (src_x_offset < 0 && src_y_offset < 0) {
-               REG_SET_2(CURSOR_POSITION, 0,
-                       CURSOR_X_POSITION, 0,
-                       CURSOR_Y_POSITION, 0);
-               x_hotspot -= src_x_offset;
-               y_hotspot -= src_y_offset;
-       } else if (src_x_offset < 0) {
-               REG_SET_2(CURSOR_POSITION, 0,
-                       CURSOR_X_POSITION, 0,
-                       CURSOR_Y_POSITION, pos->y);
-               x_hotspot -= src_x_offset;
-       } else if (src_y_offset < 0) {
-               REG_SET_2(CURSOR_POSITION, 0,
+       REG_SET_2(CURSOR_POSITION, 0,
                        CURSOR_X_POSITION, pos->x,
-                       CURSOR_Y_POSITION, 0);
-               y_hotspot -= src_y_offset;
-       } else {
-               REG_SET_2(CURSOR_POSITION, 0,
-                               CURSOR_X_POSITION, pos->x,
-                               CURSOR_Y_POSITION, pos->y);
-       }
+                       CURSOR_Y_POSITION, pos->y);
 
        REG_SET_2(CURSOR_HOT_SPOT, 0,
                        CURSOR_HOT_SPOT_X, x_hotspot,
index 9aa7bec1b5fe6f3aeb67da66d4b88b2e16966bbb..23b5b94a4939ac809c40448f1aa33e5d1500f93e 100644 (file)
@@ -91,6 +91,12 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
         *   MP0CLK DS
         */
        data->registry_data.disallowed_features = 0xE0041C00;
+       /* ECC feature should be disabled on old SMUs */
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
+       hwmgr->smu_version = smum_get_argument(hwmgr);
+       if (hwmgr->smu_version < 0x282100)
+               data->registry_data.disallowed_features |= FEATURE_ECC_MASK;
+
        data->registry_data.od_state_in_dc_support = 0;
        data->registry_data.thermal_support = 1;
        data->registry_data.skip_baco_hardware = 0;
@@ -357,6 +363,7 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
        data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT;
        data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT;
        data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT;
+       data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT;
 
        for (i = 0; i < GNLD_FEATURES_MAX; i++) {
                data->smu_features[i].smu_feature_bitmap =
@@ -3020,7 +3027,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
                                "FCLK_DS",
                                "MP1CLK_DS",
                                "MP0CLK_DS",
-                               "XGMI"};
+                               "XGMI",
+                               "ECC"};
        static const char *output_title[] = {
                                "FEATURES",
                                "BITMASK",
@@ -3462,6 +3470,7 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
        struct vega20_single_dpm_table *dpm_table;
        bool vblank_too_short = false;
        bool disable_mclk_switching;
+       bool disable_fclk_switching;
        uint32_t i, latency;
 
        disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
@@ -3537,13 +3546,20 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
        if (hwmgr->display_config->nb_pstate_switch_disable)
                dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
 
+       if ((disable_mclk_switching &&
+           (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) ||
+            hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value)
+               disable_fclk_switching = true;
+       else
+               disable_fclk_switching = false;
+
        /* fclk */
        dpm_table = &(data->dpm_table.fclk_table);
        dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
        dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
        dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
        dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
-       if (hwmgr->display_config->nb_pstate_switch_disable)
+       if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching)
                dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
 
        /* vclk */
index a5bc758ae09728327bd1230dbbf9969460eba263..ac2a3118a0ae779224be91fd750dda2319027a64 100644 (file)
@@ -80,6 +80,7 @@ enum {
        GNLD_DS_MP1CLK,
        GNLD_DS_MP0CLK,
        GNLD_XGMI,
+       GNLD_ECC,
 
        GNLD_FEATURES_MAX
 };
index 63d5cf69154967b90aa696de2ae5c1d407bd579f..195c4ae67058554d1d2bf194cedf9762f79a96e3 100644 (file)
@@ -99,7 +99,7 @@
 #define FEATURE_DS_MP1CLK_BIT           30
 #define FEATURE_DS_MP0CLK_BIT           31
 #define FEATURE_XGMI_BIT                32
-#define FEATURE_SPARE_33_BIT            33
+#define FEATURE_ECC_BIT                 33
 #define FEATURE_SPARE_34_BIT            34
 #define FEATURE_SPARE_35_BIT            35
 #define FEATURE_SPARE_36_BIT            36
 #define FEATURE_DS_FCLK_MASK            (1 << FEATURE_DS_FCLK_BIT            )
 #define FEATURE_DS_MP1CLK_MASK          (1 << FEATURE_DS_MP1CLK_BIT          )
 #define FEATURE_DS_MP0CLK_MASK          (1 << FEATURE_DS_MP0CLK_BIT          )
-#define FEATURE_XGMI_MASK               (1 << FEATURE_XGMI_BIT               )
+#define FEATURE_XGMI_MASK               (1ULL << FEATURE_XGMI_BIT               )
+#define FEATURE_ECC_MASK                (1ULL << FEATURE_ECC_BIT                )
 
 #define DPM_OVERRIDE_DISABLE_SOCCLK_PID             0x00000001
 #define DPM_OVERRIDE_DISABLE_UCLK_PID               0x00000002
index a63e5f0dae56ad3de5a372ecd167a350d5b89457..db761329a1e3ef19d2fa05f86fdaf5b3b06c6b53 100644 (file)
@@ -1037,6 +1037,31 @@ void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data,
 }
 EXPORT_SYMBOL_GPL(dw_hdmi_phy_i2c_write);
 
+/* Filter out invalid setups to avoid configuring SCDC and scrambling */
+static bool dw_hdmi_support_scdc(struct dw_hdmi *hdmi)
+{
+       struct drm_display_info *display = &hdmi->connector.display_info;
+
+       /* Completely disable SCDC support for older controllers */
+       if (hdmi->version < 0x200a)
+               return false;
+
+       /* Disable if SCDC is not supported, or if an HF-VSDB block is absent */
+       if (!display->hdmi.scdc.supported ||
+           !display->hdmi.scdc.scrambling.supported)
+               return false;
+
+       /*
+        * Disable if display only support low TMDS rates and scrambling
+        * for low rates is not supported either
+        */
+       if (!display->hdmi.scdc.scrambling.low_rates &&
+           display->max_tmds_clock <= 340000)
+               return false;
+
+       return true;
+}
+
 /*
  * HDMI2.0 Specifies the following procedure for High TMDS Bit Rates:
  * - The Source shall suspend transmission of the TMDS clock and data
@@ -1055,7 +1080,7 @@ void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi)
        unsigned long mtmdsclock = hdmi->hdmi_data.video_mode.mtmdsclock;
 
        /* Control for TMDS Bit Period/TMDS Clock-Period Ratio */
-       if (hdmi->connector.display_info.hdmi.scdc.supported) {
+       if (dw_hdmi_support_scdc(hdmi)) {
                if (mtmdsclock > HDMI14_MAX_TMDSCLK)
                        drm_scdc_set_high_tmds_clock_ratio(hdmi->ddc, 1);
                else
@@ -1579,8 +1604,9 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
 
        /* Set up HDMI_FC_INVIDCONF */
        inv_val = (hdmi->hdmi_data.hdcp_enable ||
-                  vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
-                  hdmi_info->scdc.scrambling.low_rates ?
+                  (dw_hdmi_support_scdc(hdmi) &&
+                   (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
+                    hdmi_info->scdc.scrambling.low_rates)) ?
                HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE :
                HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE);
 
@@ -1646,7 +1672,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
        }
 
        /* Scrambling Control */
-       if (hdmi_info->scdc.supported) {
+       if (dw_hdmi_support_scdc(hdmi)) {
                if (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
                    hdmi_info->scdc.scrambling.low_rates) {
                        /*
index 40ac1984803459b7a0e8f67e09f81b61820035ef..fbb76332cc9f149c0cc037a6d35a5ed9c63d1baa 100644 (file)
@@ -1034,7 +1034,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
                        funcs->atomic_disable(crtc, old_crtc_state);
                else if (funcs->disable)
                        funcs->disable(crtc);
-               else
+               else if (funcs->dpms)
                        funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
 
                if (!(dev->irq_enabled && dev->num_crtcs))
@@ -1277,10 +1277,9 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
                if (new_crtc_state->enable) {
                        DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
                                         crtc->base.id, crtc->name);
-
                        if (funcs->atomic_enable)
                                funcs->atomic_enable(crtc, old_crtc_state);
-                       else
+                       else if (funcs->commit)
                                funcs->commit(crtc);
                }
        }
index 035479e273beca866575c4bef70438583029d2df..e3f9caa7839f7347e1eaa25a798c6574446e813f 100644 (file)
@@ -448,7 +448,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
 /**
  * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU
  * @vgpu: a vGPU
- * @conncted: link state
+ * @connected: link state
  *
  * This function is used to trigger hotplug interrupt for vGPU
  *
index 3e7e2b80c8579017cecdda478bc6166e1f46e061..69a9a1b2ea4ac44ba7d8f6530f99f59a9958076f 100644 (file)
@@ -209,7 +209,7 @@ static int vgpu_get_plane_info(struct drm_device *dev,
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_vgpu_primary_plane_format p;
        struct intel_vgpu_cursor_plane_format c;
-       int ret;
+       int ret, tile_height = 1;
 
        if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
                ret = intel_vgpu_decode_primary_plane(vgpu, &p);
@@ -228,19 +228,19 @@ static int vgpu_get_plane_info(struct drm_device *dev,
                        break;
                case PLANE_CTL_TILED_X:
                        info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
+                       tile_height = 8;
                        break;
                case PLANE_CTL_TILED_Y:
                        info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
+                       tile_height = 32;
                        break;
                case PLANE_CTL_TILED_YF:
                        info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
+                       tile_height = 32;
                        break;
                default:
                        gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
                }
-
-               info->size = (((p.stride * p.height * p.bpp) / 8) +
-                             (PAGE_SIZE - 1)) >> PAGE_SHIFT;
        } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
                ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
                if (ret)
@@ -262,14 +262,13 @@ static int vgpu_get_plane_info(struct drm_device *dev,
                        info->x_hot = UINT_MAX;
                        info->y_hot = UINT_MAX;
                }
-
-               info->size = (((info->stride * c.height * c.bpp) / 8)
-                               + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
        } else {
                gvt_vgpu_err("invalid plane id:%d\n", plane_id);
                return -EINVAL;
        }
 
+       info->size = (info->stride * roundup(info->height, tile_height)
+                     + PAGE_SIZE - 1) >> PAGE_SHIFT;
        if (info->size == 0) {
                gvt_vgpu_err("fb size is zero\n");
                return -EINVAL;
index d7052ab7908c8d9c7872df64cb5cd68ec8f13b4e..9814773882ec2b875ae2db00a22768deed72c618 100644 (file)
@@ -750,14 +750,20 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
 
 static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
 {
-       struct intel_vgpu_ppgtt_spt *spt;
+       struct intel_vgpu_ppgtt_spt *spt, *spn;
        struct radix_tree_iter iter;
-       void **slot;
+       LIST_HEAD(all_spt);
+       void __rcu **slot;
 
+       rcu_read_lock();
        radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
                spt = radix_tree_deref_slot(slot);
-               ppgtt_free_spt(spt);
+               list_move(&spt->post_shadow_list, &all_spt);
        }
+       rcu_read_unlock();
+
+       list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list)
+               ppgtt_free_spt(spt);
 }
 
 static int ppgtt_handle_guest_write_page_table_bytes(
@@ -1946,7 +1952,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
  */
 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
 {
-       atomic_dec(&mm->pincount);
+       atomic_dec_if_positive(&mm->pincount);
 }
 
 /**
index d5fcc447d22f0d0663a4664c5767d72fa199cb0d..a68addf95c230f2edcc9b5b21860e9aee406bc27 100644 (file)
@@ -905,7 +905,7 @@ static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
 static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
                void *buf, unsigned long count, bool is_write)
 {
-       void *aperture_va;
+       void __iomem *aperture_va;
 
        if (!intel_vgpu_in_aperture(vgpu, off) ||
            !intel_vgpu_in_aperture(vgpu, off + count)) {
@@ -920,9 +920,9 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
                return -EIO;
 
        if (is_write)
-               memcpy(aperture_va + offset_in_page(off), buf, count);
+               memcpy_toio(aperture_va + offset_in_page(off), buf, count);
        else
-               memcpy(buf, aperture_va + offset_in_page(off), count);
+               memcpy_fromio(buf, aperture_va + offset_in_page(off), count);
 
        io_mapping_unmap(aperture_va);
 
index 159192c097cc7eb7424070e8cec052f3f5e5b1f7..05b953793316b28ac1fb19c902474e468ba828b0 100644 (file)
@@ -1486,8 +1486,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
                intel_runtime_pm_put_unchecked(dev_priv);
        }
 
-       if (ret && (vgpu_is_vm_unhealthy(ret))) {
-               enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
+       if (ret) {
+               if (vgpu_is_vm_unhealthy(ret))
+                       enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
                intel_vgpu_destroy_workload(workload);
                return ERR_PTR(ret);
        }
index 0bd890c04fe4f7c911bd9bde1a79af11af08ff5c..f6f6e5b78e9784c0ffee5f7132a8ddd2a9339954 100644 (file)
@@ -4830,7 +4830,10 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
                ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
                                       &ctx);
                if (ret) {
-                       ret = -EINTR;
+                       if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
+                               try_again = true;
+                               continue;
+                       }
                        break;
                }
                crtc = connector->state->crtc;
index 73a7bee24a663faa672ade21cfe7ea7cb1bc4b46..641e0778fa9c4123204f75091df3c53b5162a961 100644 (file)
@@ -323,6 +323,21 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
        }
 }
 
+static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
+                                    struct intel_dsi *intel_dsi)
+{
+       enum port port;
+
+       for_each_dsi_port(port, intel_dsi->ports) {
+               WARN_ON(intel_dsi->io_wakeref[port]);
+               intel_dsi->io_wakeref[port] =
+                       intel_display_power_get(dev_priv,
+                                               port == PORT_A ?
+                                               POWER_DOMAIN_PORT_DDI_A_IO :
+                                               POWER_DOMAIN_PORT_DDI_B_IO);
+       }
+}
+
 static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -336,13 +351,7 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
                I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
        }
 
-       for_each_dsi_port(port, intel_dsi->ports) {
-               intel_dsi->io_wakeref[port] =
-                       intel_display_power_get(dev_priv,
-                                               port == PORT_A ?
-                                               POWER_DOMAIN_PORT_DDI_A_IO :
-                                               POWER_DOMAIN_PORT_DDI_B_IO);
-       }
+       get_dsi_io_power_domains(dev_priv, intel_dsi);
 }
 
 static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
@@ -589,6 +598,12 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
                val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
        }
        I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+
+       for_each_dsi_port(port, intel_dsi->ports) {
+               val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+       }
+       I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+
        POSTING_READ(DPCLKA_CFGCR0_ICL);
 
        mutex_unlock(&dev_priv->dpll_lock);
@@ -1117,7 +1132,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
                        DRM_ERROR("DDI port:%c buffer not idle\n",
                                  port_name(port));
        }
-       gen11_dsi_ungate_clocks(encoder);
+       gen11_dsi_gate_clocks(encoder);
 }
 
 static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
@@ -1218,20 +1233,11 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
        return 0;
 }
 
-static u64 gen11_dsi_get_power_domains(struct intel_encoder *encoder,
-                                      struct intel_crtc_state *crtc_state)
+static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
+                                       struct intel_crtc_state *crtc_state)
 {
-       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
-       u64 domains = 0;
-       enum port port;
-
-       for_each_dsi_port(port, intel_dsi->ports)
-               if (port == PORT_A)
-                       domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO);
-               else
-                       domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO);
-
-       return domains;
+       get_dsi_io_power_domains(to_i915(encoder->base.dev),
+                                enc_to_intel_dsi(&encoder->base));
 }
 
 static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
index 14d580cdefd3e875e08b7af0be350d4f877fb7ef..ab4e60dfd6a3460001cbcae4691f1ede8ebb230e 100644 (file)
@@ -2075,12 +2075,11 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
                                              intel_aux_power_domain(dig_port);
 }
 
-static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
-                                      struct intel_crtc_state *crtc_state)
+static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
+                                       struct intel_crtc_state *crtc_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_digital_port *dig_port;
-       u64 domains;
 
        /*
         * TODO: Add support for MST encoders. Atm, the following should never
@@ -2088,10 +2087,10 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
         * hook.
         */
        if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
-               return 0;
+               return;
 
        dig_port = enc_to_dig_port(&encoder->base);
-       domains = BIT_ULL(dig_port->ddi_io_power_domain);
+       intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
 
        /*
         * AUX power is only needed for (e)DP mode, and for HDMI mode on TC
@@ -2099,15 +2098,15 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
         */
        if (intel_crtc_has_dp_encoder(crtc_state) ||
            intel_port_is_tc(dev_priv, encoder->port))
-               domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port));
+               intel_display_power_get(dev_priv,
+                                       intel_ddi_main_link_aux_domain(dig_port));
 
        /*
         * VDSC power is needed when DSC is enabled
         */
        if (crtc_state->dsc_params.compression_enable)
-               domains |= BIT_ULL(intel_dsc_power_domain(crtc_state));
-
-       return domains;
+               intel_display_power_get(dev_priv,
+                                       intel_dsc_power_domain(crtc_state));
 }
 
 void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
@@ -2825,10 +2824,10 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
                                return;
                }
                /*
-                * DSI ports should have their DDI clock ungated when disabled
-                * and gated when enabled.
+                * For DSI we keep the ddi clocks gated
+                * except during enable/disable sequence.
                 */
-               ddi_clk_needed = !encoder->base.crtc;
+               ddi_clk_needed = false;
        }
 
        val = I915_READ(DPCLKA_CFGCR0_ICL);
index ccb616351bba725052ea3a752cc5263744d58af2..421aac80a83815b9c1cfa40a7142e171cfd8bcd5 100644 (file)
@@ -15986,8 +15986,6 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
        struct intel_encoder *encoder;
 
        for_each_intel_encoder(&dev_priv->drm, encoder) {
-               u64 get_domains;
-               enum intel_display_power_domain domain;
                struct intel_crtc_state *crtc_state;
 
                if (!encoder->get_power_domains)
@@ -16001,9 +15999,7 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
                        continue;
 
                crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
-               get_domains = encoder->get_power_domains(encoder, crtc_state);
-               for_each_power_domain(domain, get_domains)
-                       intel_display_power_get(dev_priv, domain);
+               encoder->get_power_domains(encoder, crtc_state);
        }
 }
 
index cf709835fb9a9eece3c0761c21c53c34a25b7e22..8891f29a8c7fffacad25f29e718376aa164261f7 100644 (file)
@@ -1859,42 +1859,6 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
        return -EINVAL;
 }
 
-/* Optimize link config in order: max bpp, min lanes, min clock */
-static int
-intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
-                                 struct intel_crtc_state *pipe_config,
-                                 const struct link_config_limits *limits)
-{
-       struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-       int bpp, clock, lane_count;
-       int mode_rate, link_clock, link_avail;
-
-       for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
-               mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
-                                                  bpp);
-
-               for (lane_count = limits->min_lane_count;
-                    lane_count <= limits->max_lane_count;
-                    lane_count <<= 1) {
-                       for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
-                               link_clock = intel_dp->common_rates[clock];
-                               link_avail = intel_dp_max_data_rate(link_clock,
-                                                                   lane_count);
-
-                               if (mode_rate <= link_avail) {
-                                       pipe_config->lane_count = lane_count;
-                                       pipe_config->pipe_bpp = bpp;
-                                       pipe_config->port_clock = link_clock;
-
-                                       return 0;
-                               }
-                       }
-               }
-       }
-
-       return -EINVAL;
-}
-
 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
 {
        int i, num_bpc;
@@ -2031,15 +1995,13 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
        limits.min_bpp = 6 * 3;
        limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
 
-       if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
+       if (intel_dp_is_edp(intel_dp)) {
                /*
                 * Use the maximum clock and number of lanes the eDP panel
-                * advertizes being capable of. The eDP 1.3 and earlier panels
-                * are generally designed to support only a single clock and
-                * lane configuration, and typically these values correspond to
-                * the native resolution of the panel. With eDP 1.4 rate select
-                * and DSC, this is decreasingly the case, and we need to be
-                * able to select less than maximum link config.
+                * advertizes being capable of. The panels are generally
+                * designed to support only a single clock and lane
+                * configuration, and typically these values correspond to the
+                * native resolution of the panel.
                 */
                limits.min_lane_count = limits.max_lane_count;
                limits.min_clock = limits.max_clock;
@@ -2053,22 +2015,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
                      intel_dp->common_rates[limits.max_clock],
                      limits.max_bpp, adjusted_mode->crtc_clock);
 
-       if (intel_dp_is_edp(intel_dp))
-               /*
-                * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
-                * section A.1: "It is recommended that the minimum number of
-                * lanes be used, using the minimum link rate allowed for that
-                * lane configuration."
-                *
-                * Note that we use the max clock and lane count for eDP 1.3 and
-                * earlier, and fast vs. wide is irrelevant.
-                */
-               ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config,
-                                                       &limits);
-       else
-               /* Optimize for slow and wide. */
-               ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
-                                                       &limits);
+       /*
+        * Optimize for slow and wide. This is the place to add alternative
+        * optimization policy.
+        */
+       ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
 
        /* enable compression if the mode doesn't fit available BW */
        DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
index 15db41394b9ed75d9de8545c8a0faff9efbc9a9b..d5660ac1b0d60999d8788710a0140ec2f8d38570 100644 (file)
@@ -270,10 +270,12 @@ struct intel_encoder {
         * be set correctly before calling this function. */
        void (*get_config)(struct intel_encoder *,
                           struct intel_crtc_state *pipe_config);
-       /* Returns a mask of power domains that need to be referenced as part
-        * of the hardware state readout code. */
-       u64 (*get_power_domains)(struct intel_encoder *encoder,
-                                struct intel_crtc_state *crtc_state);
+       /*
+        * Acquires the power domains needed for an active encoder during
+        * hardware state readout.
+        */
+       void (*get_power_domains)(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *crtc_state);
        /*
         * Called during system suspend after all pending requests for the
         * encoder are flushed (for example for DP AUX transactions) and
index 6403728fe7784f54977b0c318d790ea886553a04..31c93c3ccd00ffa62c3158d159d7cc4afd8f9ae5 100644 (file)
@@ -256,6 +256,28 @@ static void band_gap_reset(struct drm_i915_private *dev_priv)
        mutex_unlock(&dev_priv->sb_lock);
 }
 
+static int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       u32 tmp;
+
+       tmp = I915_READ(PIPEMISC(crtc->pipe));
+
+       switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
+       case PIPEMISC_DITHER_6_BPC:
+               return 18;
+       case PIPEMISC_DITHER_8_BPC:
+               return 24;
+       case PIPEMISC_DITHER_10_BPC:
+               return 30;
+       case PIPEMISC_DITHER_12_BPC:
+               return 36;
+       default:
+               MISSING_CASE(tmp);
+               return 0;
+       }
+}
+
 static int intel_dsi_compute_config(struct intel_encoder *encoder,
                                    struct intel_crtc_state *pipe_config,
                                    struct drm_connector_state *conn_state)
@@ -1071,6 +1093,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
        bpp = mipi_dsi_pixel_format_to_bpp(
                        pixel_format_from_register_bits(fmt));
 
+       pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
+
        /* Enable Frame time stamo based scanline reporting */
        adjusted_mode->private_flags |=
                        I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
index 22e68a100e7beeaf752efde0235dddc7e13b9683..5d333138f9136b6e8b98238a9851085dd669c236 100644 (file)
@@ -662,13 +662,11 @@ static unsigned int mt8173_calculate_factor(int clock)
 static unsigned int mt2701_calculate_factor(int clock)
 {
        if (clock <= 64000)
-               return 16;
-       else if (clock <= 128000)
-               return 8;
-       else if (clock <= 256000)
                return 4;
-       else
+       else if (clock <= 128000)
                return 2;
+       else
+               return 1;
 }
 
 static const struct mtk_dpi_conf mt8173_conf = {
index cf59ea9bccfdf659ca042df67cf437a5da4a1078..57ce4708ef1b9a420cd9862d3c7b7d624a7efa3f 100644 (file)
@@ -15,6 +15,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_helper.h>
 #include <drm/drm_gem.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_of.h>
@@ -341,6 +342,8 @@ static struct drm_driver mtk_drm_driver = {
        .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
        .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
        .gem_prime_mmap = mtk_drm_gem_mmap_buf,
+       .gem_prime_vmap = mtk_drm_gem_prime_vmap,
+       .gem_prime_vunmap = mtk_drm_gem_prime_vunmap,
        .fops = &mtk_drm_fops,
 
        .name = DRIVER_NAME,
@@ -376,6 +379,10 @@ static int mtk_drm_bind(struct device *dev)
        if (ret < 0)
                goto err_deinit;
 
+       ret = drm_fbdev_generic_setup(drm, 32);
+       if (ret)
+               DRM_ERROR("Failed to initialize fbdev: %d\n", ret);
+
        return 0;
 
 err_deinit:
index 259b7b0de1d22d7beb052c19e2ef06e4859afcff..38483e9ee071223228e3b9bc493dc27799390411 100644 (file)
@@ -241,3 +241,49 @@ err_gem_free:
        kfree(mtk_gem);
        return ERR_PTR(ret);
 }
+
+void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
+{
+       struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+       struct sg_table *sgt;
+       struct sg_page_iter iter;
+       unsigned int npages;
+       unsigned int i = 0;
+
+       if (mtk_gem->kvaddr)
+               return mtk_gem->kvaddr;
+
+       sgt = mtk_gem_prime_get_sg_table(obj);
+       if (IS_ERR(sgt))
+               return NULL;
+
+       npages = obj->size >> PAGE_SHIFT;
+       mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
+       if (!mtk_gem->pages)
+               goto out;
+
+       for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
+               mtk_gem->pages[i++] = sg_page_iter_page(&iter);
+               if (i > npages)
+                       break;
+       }
+       mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
+                              pgprot_writecombine(PAGE_KERNEL));
+
+out:
+       kfree((void *)sgt);
+
+       return mtk_gem->kvaddr;
+}
+
+void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+       struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+
+       if (!mtk_gem->pages)
+               return;
+
+       vunmap(vaddr);
+       mtk_gem->kvaddr = 0;
+       kfree((void *)mtk_gem->pages);
+}
index 534639b43a1c77c24a5942c6cb627dc32389f425..c047a7ef294fd0391ef01cf15071f2d8b3e98f3f 100644 (file)
@@ -37,6 +37,7 @@ struct mtk_drm_gem_obj {
        dma_addr_t              dma_addr;
        unsigned long           dma_attrs;
        struct sg_table         *sg;
+       struct page             **pages;
 };
 
 #define to_mtk_gem_obj(x)      container_of(x, struct mtk_drm_gem_obj, base)
@@ -52,5 +53,7 @@ int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj,
 struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj);
 struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
                        struct dma_buf_attachment *attach, struct sg_table *sg);
+void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj);
+void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
 
 #endif
index 915cc84621aeaf62516681a87d56e0e9760197d4..e04e6c293d39d189e87cb9bc1d25e4e840750857 100644 (file)
@@ -1480,7 +1480,6 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
        if (IS_ERR(regmap))
                ret = PTR_ERR(regmap);
        if (ret) {
-               ret = PTR_ERR(regmap);
                dev_err(dev,
                        "Failed to get system configuration registers: %d\n",
                        ret);
@@ -1516,6 +1515,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
        of_node_put(remote);
 
        hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
+       of_node_put(i2c_np);
        if (!hdmi->ddc_adpt) {
                dev_err(dev, "Failed to get ddc i2c adapter by node\n");
                return -EINVAL;
index 4ef9c57ffd44d4eb6db90dd8cecc0ed02de81295..5223498502c49228839fb993c4c2abd7ccc84a89 100644 (file)
@@ -15,28 +15,6 @@ static const struct phy_ops mtk_hdmi_phy_dev_ops = {
        .owner = THIS_MODULE,
 };
 
-long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
-                            unsigned long *parent_rate)
-{
-       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
-
-       hdmi_phy->pll_rate = rate;
-       if (rate <= 74250000)
-               *parent_rate = rate;
-       else
-               *parent_rate = rate / 2;
-
-       return rate;
-}
-
-unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
-                                      unsigned long parent_rate)
-{
-       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
-
-       return hdmi_phy->pll_rate;
-}
-
 void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
                             u32 bits)
 {
@@ -110,13 +88,11 @@ mtk_hdmi_phy_dev_get_ops(const struct mtk_hdmi_phy *hdmi_phy)
                return NULL;
 }
 
-static void mtk_hdmi_phy_clk_get_ops(struct mtk_hdmi_phy *hdmi_phy,
-                                    const struct clk_ops **ops)
+static void mtk_hdmi_phy_clk_get_data(struct mtk_hdmi_phy *hdmi_phy,
+                                     struct clk_init_data *clk_init)
 {
-       if (hdmi_phy && hdmi_phy->conf && hdmi_phy->conf->hdmi_phy_clk_ops)
-               *ops = hdmi_phy->conf->hdmi_phy_clk_ops;
-       else
-               dev_err(hdmi_phy->dev, "Failed to get clk ops of phy\n");
+       clk_init->flags = hdmi_phy->conf->flags;
+       clk_init->ops = hdmi_phy->conf->hdmi_phy_clk_ops;
 }
 
 static int mtk_hdmi_phy_probe(struct platform_device *pdev)
@@ -129,7 +105,6 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
        struct clk_init_data clk_init = {
                .num_parents = 1,
                .parent_names = (const char * const *)&ref_clk_name,
-               .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
        };
 
        struct phy *phy;
@@ -167,7 +142,7 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
        hdmi_phy->dev = dev;
        hdmi_phy->conf =
                (struct mtk_hdmi_phy_conf *)of_device_get_match_data(dev);
-       mtk_hdmi_phy_clk_get_ops(hdmi_phy, &clk_init.ops);
+       mtk_hdmi_phy_clk_get_data(hdmi_phy, &clk_init);
        hdmi_phy->pll_hw.init = &clk_init;
        hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw);
        if (IS_ERR(hdmi_phy->pll)) {
index f39b1fc66612944c9b76b8b20f87ef561a0e595b..2d8b3182470dc465b29111e00ee2c9229e4f7c0d 100644 (file)
@@ -21,6 +21,7 @@ struct mtk_hdmi_phy;
 
 struct mtk_hdmi_phy_conf {
        bool tz_disabled;
+       unsigned long flags;
        const struct clk_ops *hdmi_phy_clk_ops;
        void (*hdmi_phy_enable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
        void (*hdmi_phy_disable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
@@ -48,10 +49,6 @@ void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
 void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
                       u32 val, u32 mask);
 struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw);
-long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
-                            unsigned long *parent_rate);
-unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
-                                      unsigned long parent_rate);
 
 extern struct platform_driver mtk_hdmi_phy_driver;
 extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf;
index fcc42dc6ea7fb81d1005239a52f81bc020b2f57a..d3cc4022e98844601b82928020ec7cf0cfaf1004 100644 (file)
@@ -79,7 +79,6 @@ static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
        usleep_range(80, 100);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
@@ -94,7 +93,6 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
        usleep_range(80, 100);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
@@ -108,6 +106,12 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
        usleep_range(80, 100);
 }
 
+static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+                                   unsigned long *parent_rate)
+{
+       return rate;
+}
+
 static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
                                 unsigned long parent_rate)
 {
@@ -116,13 +120,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
 
        if (rate <= 64000000)
                pos_div = 3;
-       else if (rate <= 12800000)
-               pos_div = 1;
+       else if (rate <= 128000000)
+               pos_div = 2;
        else
                pos_div = 1;
 
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_PREDIV_MASK);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
        mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IC),
                          RG_HTPLL_IC_MASK);
        mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IR),
@@ -154,6 +159,39 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
        return 0;
 }
 
+static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+       unsigned long out_rate, val;
+
+       val = (readl(hdmi_phy->regs + HDMI_CON6)
+              & RG_HTPLL_PREDIV_MASK) >> RG_HTPLL_PREDIV;
+       switch (val) {
+       case 0x00:
+               out_rate = parent_rate;
+               break;
+       case 0x01:
+               out_rate = parent_rate / 2;
+               break;
+       default:
+               out_rate = parent_rate / 4;
+               break;
+       }
+
+       val = (readl(hdmi_phy->regs + HDMI_CON6)
+              & RG_HTPLL_FBKDIV_MASK) >> RG_HTPLL_FBKDIV;
+       out_rate *= (val + 1) * 2;
+       val = (readl(hdmi_phy->regs + HDMI_CON2)
+              & RG_HDMITX_TX_POSDIV_MASK);
+       out_rate >>= (val >> RG_HDMITX_TX_POSDIV);
+
+       if (readl(hdmi_phy->regs + HDMI_CON2) & RG_HDMITX_EN_TX_POSDIV)
+               out_rate /= 5;
+
+       return out_rate;
+}
+
 static const struct clk_ops mtk_hdmi_phy_pll_ops = {
        .prepare = mtk_hdmi_pll_prepare,
        .unprepare = mtk_hdmi_pll_unprepare,
@@ -174,7 +212,6 @@ static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
        usleep_range(80, 100);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
@@ -186,7 +223,6 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
        usleep_range(80, 100);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
@@ -202,6 +238,7 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
 
 struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf = {
        .tz_disabled = true,
+       .flags = CLK_SET_RATE_GATE,
        .hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
        .hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
        .hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
index ed5916b2765843b09056f0788ce93d190e5a0367..47f8a295168224b525959bafb8f0e631ee799b2b 100644 (file)
@@ -199,6 +199,20 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
        usleep_range(100, 150);
 }
 
+static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+                                   unsigned long *parent_rate)
+{
+       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+       hdmi_phy->pll_rate = rate;
+       if (rate <= 74250000)
+               *parent_rate = rate;
+       else
+               *parent_rate = rate / 2;
+
+       return rate;
+}
+
 static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
                                 unsigned long parent_rate)
 {
@@ -285,6 +299,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
        return 0;
 }
 
+static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+       return hdmi_phy->pll_rate;
+}
+
 static const struct clk_ops mtk_hdmi_phy_pll_ops = {
        .prepare = mtk_hdmi_pll_prepare,
        .unprepare = mtk_hdmi_pll_unprepare,
@@ -309,6 +331,7 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
 }
 
 struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf = {
+       .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
        .hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
        .hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
        .hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
index 340383150fb98d24567b6ca74cae0298cda806b6..ebf9c96d43eee56649e510a5ca8c53a045b10c67 100644 (file)
@@ -175,6 +175,7 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
                REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0, 3, 3);
                hdmi_wp_clear_irqenable(core->wp, HDMI_IRQ_CORE);
                hdmi_wp_set_irqstatus(core->wp, HDMI_IRQ_CORE);
+               REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
                hdmi4_core_disable(core);
                return 0;
        }
@@ -182,16 +183,24 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
        if (err)
                return err;
 
+       /*
+        * Initialize CEC clock divider: CEC needs 2MHz clock hence
+        * set the divider to 24 to get 48/24=2MHz clock
+        */
+       REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0);
+
        /* Clear TX FIFO */
        if (!hdmi_cec_clear_tx_fifo(adap)) {
                pr_err("cec-%s: could not clear TX FIFO\n", adap->name);
-               return -EIO;
+               err = -EIO;
+               goto err_disable_clk;
        }
 
        /* Clear RX FIFO */
        if (!hdmi_cec_clear_rx_fifo(adap)) {
                pr_err("cec-%s: could not clear RX FIFO\n", adap->name);
-               return -EIO;
+               err = -EIO;
+               goto err_disable_clk;
        }
 
        /* Clear CEC interrupts */
@@ -236,6 +245,12 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
                hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, temp);
        }
        return 0;
+
+err_disable_clk:
+       REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
+       hdmi4_core_disable(core);
+
+       return err;
 }
 
 static int hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
@@ -333,11 +348,8 @@ int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core,
                return ret;
        core->wp = wp;
 
-       /*
-        * Initialize CEC clock divider: CEC needs 2MHz clock hence
-        * set the devider to 24 to get 48/24=2MHz clock
-        */
-       REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0);
+       /* Disable clock initially, hdmi_cec_adap_enable() manages it */
+       REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
 
        ret = cec_register_adapter(core->adap, &pdev->dev);
        if (ret < 0) {
index 813ba42f27539ce94b85afc20295e411f6b4c123..e384b95ad8573a7ef9ad9bb9a631432eda4a726f 100644 (file)
@@ -708,7 +708,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
        else
                acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
        /*
-        * The I2S input word length is twice the lenght given in the IEC-60958
+        * The I2S input word length is twice the length given in the IEC-60958
         * status word. If the word size is greater than
         * 20 bits, increment by one.
         */
index dc47720c99ba5689a8c12f217c7859960bfa394d..39d8509d96a0d3162f8bd7c310f4aa0a18898f6b 100644 (file)
@@ -48,8 +48,13 @@ static enum drm_mode_status
 sun8i_dw_hdmi_mode_valid_h6(struct drm_connector *connector,
                            const struct drm_display_mode *mode)
 {
-       /* This is max for HDMI 2.0b (4K@60Hz) */
-       if (mode->clock > 594000)
+       /*
+        * Controller support maximum of 594 MHz, which correlates to
+        * 4K@60Hz 4:4:4 or RGB. However, for frequencies greater than
+        * 340 MHz scrambling has to be enabled. Because scrambling is
+        * not yet implemented, just limit to 340 MHz for now.
+        */
+       if (mode->clock > 340000)
                return MODE_CLOCK_HIGH;
 
        return MODE_OK;
index fc36e0c10a374a2a33a054a4102944dfb0b03009..b1e7c76e9c17269664fddd5ab5c90c3477b80a0c 100644 (file)
@@ -227,7 +227,7 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
 
 err_unregister_gates:
        for (i = 0; i < CLK_NUM; i++)
-               if (clk_data->hws[i])
+               if (!IS_ERR_OR_NULL(clk_data->hws[i]))
                        clk_hw_unregister_gate(clk_data->hws[i]);
        clk_disable_unprepare(tcon_top->bus);
 err_assert_reset:
@@ -245,7 +245,8 @@ static void sun8i_tcon_top_unbind(struct device *dev, struct device *master,
 
        of_clk_del_provider(dev->of_node);
        for (i = 0; i < CLK_NUM; i++)
-               clk_hw_unregister_gate(clk_data->hws[i]);
+               if (clk_data->hws[i])
+                       clk_hw_unregister_gate(clk_data->hws[i]);
 
        clk_disable_unprepare(tcon_top->bus);
        reset_control_assert(tcon_top->rst);
index 47c55974756d576b71193219b92d976078006b4e..d23c4bfde790ca0864722e8b1eb2907b7c9ee24f 100644 (file)
@@ -1260,9 +1260,15 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
 
        hdmi->dvi = !tegra_output_is_hdmi(output);
        if (!hdmi->dvi) {
-               err = tegra_hdmi_setup_audio(hdmi);
-               if (err < 0)
-                       hdmi->dvi = true;
+               /*
+                * Make sure that the audio format has been configured before
+                * enabling audio, otherwise we may try to divide by zero.
+               */
+               if (hdmi->format.sample_rate > 0) {
+                       err = tegra_hdmi_setup_audio(hdmi);
+                       if (err < 0)
+                               hdmi->dvi = true;
+               }
        }
 
        if (hdmi->config->has_hda)
index 3f56647cdb35f94ddcead862b286516ad903150e..0fa5034b9f9e05bc0f8511d6f3c6545030699fe6 100644 (file)
@@ -876,8 +876,10 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
                reservation_object_add_shared_fence(bo->resv, fence);
 
                ret = reservation_object_reserve_shared(bo->resv, 1);
-               if (unlikely(ret))
+               if (unlikely(ret)) {
+                       dma_fence_put(fence);
                        return ret;
+               }
 
                dma_fence_put(bo->moving);
                bo->moving = fence;
index f841accc2c0064a3edd865423a10818480477f39..627f8dc91d0ed23e0958dfc39d106c967dcd376a 100644 (file)
@@ -730,9 +730,10 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
                        }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-                       if (!(flags & TTM_PAGE_FLAG_DMA32)) {
-                               for (j = 0; j < HPAGE_PMD_NR; ++j)
-                                       if (p++ != pages[i + j])
+                       if (!(flags & TTM_PAGE_FLAG_DMA32) &&
+                           (npages - i) >= HPAGE_PMD_NR) {
+                               for (j = 1; j < HPAGE_PMD_NR; ++j)
+                                       if (++p != pages[i + j])
                                            break;
 
                                if (j == HPAGE_PMD_NR)
@@ -759,15 +760,15 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
                unsigned max_size, n2free;
 
                spin_lock_irqsave(&huge->lock, irq_flags);
-               while (i < npages) {
+               while ((npages - i) >= HPAGE_PMD_NR) {
                        struct page *p = pages[i];
                        unsigned j;
 
                        if (!p)
                                break;
 
-                       for (j = 0; j < HPAGE_PMD_NR; ++j)
-                               if (p++ != pages[i + j])
+                       for (j = 1; j < HPAGE_PMD_NR; ++j)
+                               if (++p != pages[i + j])
                                    break;
 
                        if (j != HPAGE_PMD_NR)
index 22cd2d13e272f033d3e54b9245986ce22fa74486..ff47f890e6ad8d554fa7180aab449321a34ce5c1 100644 (file)
@@ -52,6 +52,7 @@ static struct drm_driver driver = {
        .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
        .load = udl_driver_load,
        .unload = udl_driver_unload,
+       .release = udl_driver_release,
 
        /* gem hooks */
        .gem_free_object_unlocked = udl_gem_free_object,
index e9e9b1ff678ee0a81d0d4b100b816b19122c7f0c..4ae67d882eae928e6b39fb4f240a46bfc272ed15 100644 (file)
@@ -104,6 +104,7 @@ void udl_urb_completion(struct urb *urb);
 
 int udl_driver_load(struct drm_device *dev, unsigned long flags);
 void udl_driver_unload(struct drm_device *dev);
+void udl_driver_release(struct drm_device *dev);
 
 int udl_fbdev_init(struct drm_device *dev);
 void udl_fbdev_cleanup(struct drm_device *dev);
index 9086d0d1b880de87de7609e55798e534b5cb3039..1f8ef34ade24365bce4f522104be8347eafb1586 100644 (file)
@@ -379,6 +379,12 @@ void udl_driver_unload(struct drm_device *dev)
                udl_free_urb_list(dev);
 
        udl_fbdev_cleanup(dev);
-       udl_modeset_cleanup(dev);
        kfree(udl);
 }
+
+void udl_driver_release(struct drm_device *dev)
+{
+       udl_modeset_cleanup(dev);
+       drm_dev_fini(dev);
+       kfree(dev);
+}
index 27101c04a8272668988ce5be66dfc584068f8a60..0c0eb43abf657f2369872b3f5026b3a965f4a298 100644 (file)
@@ -115,8 +115,12 @@ static inline void synchronize_syncpt_base(struct host1x_job *job)
 static void host1x_channel_set_streamid(struct host1x_channel *channel)
 {
 #if HOST1X_HW >= 6
+       u32 sid = 0x7f;
+#ifdef CONFIG_IOMMU_API
        struct iommu_fwspec *spec = dev_iommu_fwspec_get(channel->dev->parent);
-       u32 sid = spec ? spec->ids[0] & 0xffff : 0x7f;
+       if (spec)
+               sid = spec->ids[0] & 0xffff;
+#endif
 
        host1x_ch_writel(channel, sid, HOST1X_CHANNEL_SMMU_STREAMID);
 #endif
index 6ca8d322b487279348d90513caa10d6bb6745e40..4ca0cdfa6b33af35f951a3969cf9c02b67d7a1b0 100644 (file)
@@ -150,6 +150,7 @@ config HID_ASUS
        tristate "Asus"
        depends on LEDS_CLASS
        depends on ASUS_WMI || ASUS_WMI=n
+       select POWER_SUPPLY
        ---help---
        Support for Asus notebook built-in keyboard and touchpad via i2c, and
        the Asus Republic of Gamers laptop keyboard special keys.
index 9993b692598fb84d1700e26ef7f97856ff842955..860e21ec6a492a35392f2b47146f5e0a811c1068 100644 (file)
@@ -1301,10 +1301,10 @@ static u32 __extract(u8 *report, unsigned offset, int n)
 u32 hid_field_extract(const struct hid_device *hid, u8 *report,
                        unsigned offset, unsigned n)
 {
-       if (n > 32) {
-               hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n",
+       if (n > 256) {
+               hid_warn(hid, "hid_field_extract() called with n (%d) > 256! (%s)\n",
                         n, current->comm);
-               n = 32;
+               n = 256;
        }
 
        return __extract(report, offset, n);
index ac9fda1b5a7233c227cd5517dcf6331a29483a60..1384e57182af978e4329c9e946e3487f85229be5 100644 (file)
@@ -1060,10 +1060,15 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p)
        seq_printf(f, "\n\n");
 
        /* dump parsed data and input mappings */
+       if (down_interruptible(&hdev->driver_input_lock))
+               return 0;
+
        hid_dump_device(hdev, f);
        seq_printf(f, "\n");
        hid_dump_input_mapping(hdev, f);
 
+       up(&hdev->driver_input_lock);
+
        return 0;
 }
 
index b6d93f4ad037e440d1e5d23d76058e4be606159c..adce58f24f7638a70c170f17a694b5baa7f5a49a 100644 (file)
 #define USB_DEVICE_ID_SYNAPTICS_HD     0x0ac3
 #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD        0x1ac3
 #define USB_DEVICE_ID_SYNAPTICS_TP_V103        0x5710
+#define I2C_DEVICE_ID_SYNAPTICS_7E7E   0x7e7e
 
 #define USB_VENDOR_ID_TEXAS_INSTRUMENTS        0x2047
 #define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA    0x0855
index b10b1922c5bdf304a4f32100365da00f0f1572f4..b607286a0bc82f360a133b5dce204a8f2441ff5c 100644 (file)
@@ -680,6 +680,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                        break;
                }
 
+               if ((usage->hid & 0xf0) == 0xb0) {      /* SC - Display */
+                       switch (usage->hid & 0xf) {
+                       case 0x05: map_key_clear(KEY_SWITCHVIDEOMODE); break;
+                       default: goto ignore;
+                       }
+                       break;
+               }
+
                /*
                 * Some lazy vendors declare 255 usages for System Control,
                 * leading to the creation of ABS_X|Y axis and too many others.
@@ -902,7 +910,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x06a: map_key_clear(KEY_GREEN);           break;
                case 0x06b: map_key_clear(KEY_BLUE);            break;
                case 0x06c: map_key_clear(KEY_YELLOW);          break;
-               case 0x06d: map_key_clear(KEY_ZOOM);            break;
+               case 0x06d: map_key_clear(KEY_ASPECT_RATIO);    break;
 
                case 0x06f: map_key_clear(KEY_BRIGHTNESSUP);            break;
                case 0x070: map_key_clear(KEY_BRIGHTNESSDOWN);          break;
@@ -911,6 +919,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x074: map_key_clear(KEY_BRIGHTNESS_MAX);          break;
                case 0x075: map_key_clear(KEY_BRIGHTNESS_AUTO);         break;
 
+               case 0x079: map_key_clear(KEY_KBDILLUMUP);      break;
+               case 0x07a: map_key_clear(KEY_KBDILLUMDOWN);    break;
+               case 0x07c: map_key_clear(KEY_KBDILLUMTOGGLE);  break;
+
                case 0x082: map_key_clear(KEY_VIDEO_NEXT);      break;
                case 0x083: map_key_clear(KEY_LAST);            break;
                case 0x084: map_key_clear(KEY_ENTER);           break;
@@ -998,6 +1010,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x1b8: map_key_clear(KEY_VIDEO);           break;
                case 0x1bc: map_key_clear(KEY_MESSENGER);       break;
                case 0x1bd: map_key_clear(KEY_INFO);            break;
+               case 0x1cb: map_key_clear(KEY_ASSISTANT);       break;
                case 0x201: map_key_clear(KEY_NEW);             break;
                case 0x202: map_key_clear(KEY_OPEN);            break;
                case 0x203: map_key_clear(KEY_CLOSE);           break;
@@ -1021,6 +1034,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x22d: map_key_clear(KEY_ZOOMIN);          break;
                case 0x22e: map_key_clear(KEY_ZOOMOUT);         break;
                case 0x22f: map_key_clear(KEY_ZOOMRESET);       break;
+               case 0x232: map_key_clear(KEY_FULL_SCREEN);     break;
                case 0x233: map_key_clear(KEY_SCROLLUP);        break;
                case 0x234: map_key_clear(KEY_SCROLLDOWN);      break;
                case 0x238: /* AC Pan */
@@ -1044,6 +1058,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT);   break;
                case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL);   break;
 
+               case 0x29f: map_key_clear(KEY_SCALE);           break;
+
                default: map_key_clear(KEY_UNKNOWN);
                }
                break;
index 15ed6177a7a364d6b2634babe0df1be83b4cec7b..199cc256e9d9d3903016f64f66b36b909a9c1109 100644 (file)
@@ -2111,6 +2111,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
                kfree(data);
                return -ENOMEM;
        }
+       data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
+       if (!data->wq) {
+               kfree(data->effect_ids);
+               kfree(data);
+               return -ENOMEM;
+       }
+
        data->hidpp = hidpp;
        data->feature_index = feature_index;
        data->version = version;
@@ -2155,7 +2162,6 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
        /* ignore boost value at response.fap.params[2] */
 
        /* init the hardware command queue */
-       data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
        atomic_set(&data->workqueue_size, 0);
 
        /* initialize with zero autocenter to get wheel in usable state */
@@ -2608,8 +2614,9 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
                input_report_rel(mydata->input, REL_Y, v);
 
                v = hid_snto32(data[6], 8);
-               hidpp_scroll_counter_handle_scroll(
-                               &hidpp->vertical_wheel_counter, v);
+               if (v != 0)
+                       hidpp_scroll_counter_handle_scroll(
+                                       &hidpp->vertical_wheel_counter, v);
 
                input_sync(mydata->input);
        }
index 953908f2267c0653478cf88d53c7e85fdb121d76..77ffba48cc737e0df69892cfcceaafacb9815534 100644 (file)
@@ -715,7 +715,6 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
-       { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
@@ -855,7 +854,7 @@ static const struct hid_device_id hid_ignore_list[] = {
        { }
 };
 
-/**
+/*
  * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer
  *
  * There are composite devices for which we want to ignore only a certain
@@ -996,6 +995,10 @@ bool hid_ignore(struct hid_device *hdev)
                if (hdev->product == 0x0401 &&
                    strncmp(hdev->name, "ELAN0800", 8) != 0)
                        return true;
+               /* Same with product id 0x0400 */
+               if (hdev->product == 0x0400 &&
+                   strncmp(hdev->name, "QTEC0001", 8) != 0)
+                       return true;
                break;
        }
 
@@ -1042,7 +1045,7 @@ static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev)
        }
 
        if (bl_entry != NULL)
-               dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%hx:0x%hx\n",
+               dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%04x:0x%04x\n",
                        bl_entry->driver_data, bl_entry->vendor,
                        bl_entry->product);
 
@@ -1209,7 +1212,7 @@ static unsigned long hid_gets_squirk(const struct hid_device *hdev)
                quirks |= bl_entry->driver_data;
 
        if (quirks)
-               dbg_hid("Found squirk 0x%lx for HID device 0x%hx:0x%hx\n",
+               dbg_hid("Found squirk 0x%lx for HID device 0x%04x:0x%04x\n",
                        quirks, hdev->vendor, hdev->product);
        return quirks;
 }
index 8141cadfca0e3c3ce62eccff1c28cd94031827b1..8dae0f9b819e011d6695462fea7e88e85cd16669 100644 (file)
@@ -499,6 +499,7 @@ static void steam_battery_unregister(struct steam_device *steam)
 static int steam_register(struct steam_device *steam)
 {
        int ret;
+       bool client_opened;
 
        /*
         * This function can be called several times in a row with the
@@ -511,9 +512,11 @@ static int steam_register(struct steam_device *steam)
                 * Unlikely, but getting the serial could fail, and it is not so
                 * important, so make up a serial number and go on.
                 */
+               mutex_lock(&steam->mutex);
                if (steam_get_serial(steam) < 0)
                        strlcpy(steam->serial_no, "XXXXXXXXXX",
                                        sizeof(steam->serial_no));
+               mutex_unlock(&steam->mutex);
 
                hid_info(steam->hdev, "Steam Controller '%s' connected",
                                steam->serial_no);
@@ -528,13 +531,15 @@ static int steam_register(struct steam_device *steam)
        }
 
        mutex_lock(&steam->mutex);
-       if (!steam->client_opened) {
+       client_opened = steam->client_opened;
+       if (!client_opened)
                steam_set_lizard_mode(steam, lizard_mode);
+       mutex_unlock(&steam->mutex);
+
+       if (!client_opened)
                ret = steam_input_register(steam);
-       } else {
+       else
                ret = 0;
-       }
-       mutex_unlock(&steam->mutex);
 
        return ret;
 }
@@ -630,14 +635,21 @@ static void steam_client_ll_close(struct hid_device *hdev)
 {
        struct steam_device *steam = hdev->driver_data;
 
+       unsigned long flags;
+       bool connected;
+
+       spin_lock_irqsave(&steam->lock, flags);
+       connected = steam->connected;
+       spin_unlock_irqrestore(&steam->lock, flags);
+
        mutex_lock(&steam->mutex);
        steam->client_opened = false;
+       if (connected)
+               steam_set_lizard_mode(steam, lizard_mode);
        mutex_unlock(&steam->mutex);
 
-       if (steam->connected) {
-               steam_set_lizard_mode(steam, lizard_mode);
+       if (connected)
                steam_input_register(steam);
-       }
 }
 
 static int steam_client_ll_raw_request(struct hid_device *hdev,
index 7710d9f957da5b0dd07ca1444416de7cecd10529..0187c9f8fc22c5567e934cc0cc2089877963c56e 100644 (file)
@@ -735,10 +735,6 @@ static int uclogic_params_huion_init(struct uclogic_params *params,
                goto cleanup;
        }
        rc = usb_string(udev, 201, ver_ptr, ver_len);
-       if (ver_ptr == NULL) {
-               rc = -ENOMEM;
-               goto cleanup;
-       }
        if (rc == -EPIPE) {
                *ver_ptr = '\0';
        } else if (rc < 0) {
index 90164fed08d35eca2c34250c8b7cb3814ea99f53..4d1f24ee249c4455a4d5dfe18c7e6b0541311ad4 100644 (file)
@@ -184,6 +184,8 @@ static const struct i2c_hid_quirks {
                I2C_HID_QUIRK_NO_RUNTIME_PM },
        { USB_VENDOR_ID_ELAN, HID_ANY_ID,
                 I2C_HID_QUIRK_BOGUS_IRQ },
+       { USB_VENDOR_ID_SYNAPTICS, I2C_DEVICE_ID_SYNAPTICS_7E7E,
+               I2C_HID_QUIRK_NO_RUNTIME_PM },
        { 0, 0 }
 };
 
index 6f929bfa9fcd39380f7e9d9fc9729156e28e09f6..d0f1dfe2bcbbd652aa1daa682d2feac611dfa4da 100644 (file)
@@ -1759,6 +1759,7 @@ config SENSORS_VT8231
 config SENSORS_W83773G
        tristate "Nuvoton W83773G"
        depends on I2C
+       select REGMAP_I2C
        help
          If you say yes here you get support for the Nuvoton W83773G hardware
          monitoring chip.
index e4f9f7ce92fabc7c5f10aebaf5d69d350da565d2..f9abeeeead9e966dd8c8df7d225b81788eb67e40 100644 (file)
@@ -640,7 +640,7 @@ static const struct hwmon_channel_info ntc_chip = {
 };
 
 static const u32 ntc_temp_config[] = {
-       HWMON_T_INPUT, HWMON_T_TYPE,
+       HWMON_T_INPUT | HWMON_T_TYPE,
        0
 };
 
index b91a80abf724d087e02cc7aa97e211a1380f6570..4679acb4918e7f65660a8e0e32a7c5f793f03ff0 100644 (file)
@@ -890,6 +890,8 @@ static int occ_setup_sensor_attrs(struct occ *occ)
                                s++;
                        }
                }
+
+               s = (sensors->power.num_sensors * 4) + 1;
        } else {
                for (i = 0; i < sensors->power.num_sensors; ++i) {
                        s = i + 1;
@@ -918,11 +920,11 @@ static int occ_setup_sensor_attrs(struct occ *occ)
                                                     show_power, NULL, 3, i);
                        attr++;
                }
-       }
 
-       if (sensors->caps.num_sensors >= 1) {
                s = sensors->power.num_sensors + 1;
+       }
 
+       if (sensors->caps.num_sensors >= 1) {
                snprintf(attr->name, sizeof(attr->name), "power%d_label", s);
                attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
                                             0, 0);
index 42fed40198a0fb77981e90236c465c9a12bff218..c0c3043b5d6119adb0b1345c17f4dd6025b37036 100644 (file)
@@ -1169,11 +1169,13 @@ static int i2c_imx_probe(struct platform_device *pdev)
        /* Init DMA config if supported */
        ret = i2c_imx_dma_request(i2c_imx, phy_addr);
        if (ret < 0)
-               goto clk_notifier_unregister;
+               goto del_adapter;
 
        dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
        return 0;   /* Return OK */
 
+del_adapter:
+       i2c_del_adapter(&i2c_imx->adapter);
 clk_notifier_unregister:
        clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
 rpm_disable:
index 2dc628d4f1aee1b5c07593f85e7d75a6bdb3d0be..1412abcff01095cd001ece69bb80a69d714a80ba 100644 (file)
@@ -1980,7 +1980,6 @@ of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master,
 {
        struct i3c_dev_boardinfo *boardinfo;
        struct device *dev = &master->dev;
-       struct i3c_device_info info = { };
        enum i3c_addr_slot_status addrstatus;
        u32 init_dyn_addr = 0;
 
@@ -2012,8 +2011,8 @@ of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master,
 
        boardinfo->pid = ((u64)reg[1] << 32) | reg[2];
 
-       if ((info.pid & GENMASK_ULL(63, 48)) ||
-           I3C_PID_RND_LOWER_32BITS(info.pid))
+       if ((boardinfo->pid & GENMASK_ULL(63, 48)) ||
+           I3C_PID_RND_LOWER_32BITS(boardinfo->pid))
                return -EINVAL;
 
        boardinfo->init_dyn_addr = init_dyn_addr;
index 59279224e07fcefa460b1939212a5009eb8a3681..10c26ffaa8effe969464c6abc1ef6f198c069744 100644 (file)
@@ -300,7 +300,7 @@ to_dw_i3c_master(struct i3c_master_controller *master)
 
 static void dw_i3c_master_disable(struct dw_i3c_master *master)
 {
-       writel(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_ENABLE,
+       writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_ENABLE,
               master->regs + DEVICE_CTRL);
 }
 
index 7096e577b23f86f5f71aa36beae3e5526ab856d7..50f3ff386bea43f8853c5f5e7426c88c0e3f0851 100644 (file)
@@ -1437,6 +1437,8 @@ static int kxcjk1013_resume(struct device *dev)
 
        mutex_lock(&data->mutex);
        ret = kxcjk1013_set_mode(data, OPERATION);
+       if (ret == 0)
+               ret = kxcjk1013_set_range(data, data->range);
        mutex_unlock(&data->mutex);
 
        return ret;
index ff5f2da2e1b134d369fbc6ce7180a9ebf0de4ec1..54d9978b274055da963ed282b6b94c0b4245fed5 100644 (file)
@@ -121,6 +121,7 @@ static int ad_sd_read_reg_raw(struct ad_sigma_delta *sigma_delta,
        if (sigma_delta->info->has_registers) {
                data[0] = reg << sigma_delta->info->addr_shift;
                data[0] |= sigma_delta->info->read_mask;
+               data[0] |= sigma_delta->comm;
                spi_message_add_tail(&t[0], &m);
        }
        spi_message_add_tail(&t[1], &m);
index 75d2f73582a3d7581e533afd361cdb7af7df46d0..596841a3c4db77f59f5fc7c3c3f0f1fddc21aab7 100644 (file)
@@ -704,23 +704,29 @@ static int at91_adc_read_raw(struct iio_dev *idev,
                ret = wait_event_interruptible_timeout(st->wq_data_avail,
                                                       st->done,
                                                       msecs_to_jiffies(1000));
-               if (ret == 0)
-                       ret = -ETIMEDOUT;
-               if (ret < 0) {
-                       mutex_unlock(&st->lock);
-                       return ret;
-               }
-
-               *val = st->last_value;
 
+               /* Disable interrupts, regardless if adc conversion was
+                * successful or not
+                */
                at91_adc_writel(st, AT91_ADC_CHDR,
                                AT91_ADC_CH(chan->channel));
                at91_adc_writel(st, AT91_ADC_IDR, BIT(chan->channel));
 
-               st->last_value = 0;
-               st->done = false;
+               if (ret > 0) {
+                       /* a valid conversion took place */
+                       *val = st->last_value;
+                       st->last_value = 0;
+                       st->done = false;
+                       ret = IIO_VAL_INT;
+               } else if (ret == 0) {
+                       /* conversion timeout */
+                       dev_err(&idev->dev, "ADC Channel %d timeout.\n",
+                               chan->channel);
+                       ret = -ETIMEDOUT;
+               }
+
                mutex_unlock(&st->lock);
-               return IIO_VAL_INT;
+               return ret;
 
        case IIO_CHAN_INFO_SCALE:
                *val = st->vref_mv;
index b13c61539d46baf3490be318342dac64b6b1dc4b..6401ca7a9a2072e9760144c7b71717d985db6d27 100644 (file)
@@ -1292,6 +1292,7 @@ static int xadc_probe(struct platform_device *pdev)
 
 err_free_irq:
        free_irq(xadc->irq, indio_dev);
+       cancel_delayed_work_sync(&xadc->zynq_unmask_work);
 err_clk_disable_unprepare:
        clk_disable_unprepare(xadc->clk);
 err_free_samplerate_trigger:
@@ -1321,8 +1322,8 @@ static int xadc_remove(struct platform_device *pdev)
                iio_triggered_buffer_cleanup(indio_dev);
        }
        free_irq(xadc->irq, indio_dev);
+       cancel_delayed_work_sync(&xadc->zynq_unmask_work);
        clk_disable_unprepare(xadc->clk);
-       cancel_delayed_work(&xadc->zynq_unmask_work);
        kfree(xadc->data);
        kfree(indio_dev->channels);
 
index d5d146e9e372305852e7dc20c5492fc48e5218b1..92c684d2b67ecfd7992327e8703812c5f7cdd94f 100644 (file)
@@ -64,6 +64,7 @@ config IAQCORE
 config PMS7003
        tristate "Plantower PMS7003 particulate matter sensor"
        depends on SERIAL_DEV_BUS
+       select IIO_TRIGGERED_BUFFER
        help
          Say Y here to build support for the Plantower PMS7003 particulate
          matter sensor.
@@ -71,6 +72,19 @@ config PMS7003
          To compile this driver as a module, choose M here: the module will
          be called pms7003.
 
+config SENSIRION_SGP30
+       tristate "Sensirion SGPxx gas sensors"
+       depends on I2C
+       select CRC8
+       help
+         Say Y here to build I2C interface support for the following
+         Sensirion SGP gas sensors:
+           * SGP30 gas sensor
+           * SGPC3 low power gas sensor
+
+         To compile this driver as module, choose M here: the
+         module will be called sgp30.
+
 config SPS30
        tristate "SPS30 particulate matter sensor"
        depends on I2C
index 0ae89b87e2d6451fbe177e0978b2179ab4c4487b..4edc5d21cb9fa63739d70fee23976832f1cf9313 100644 (file)
@@ -2,11 +2,9 @@
 #ifndef BME680_H_
 #define BME680_H_
 
-#define BME680_REG_CHIP_I2C_ID                 0xD0
-#define BME680_REG_CHIP_SPI_ID                 0x50
+#define BME680_REG_CHIP_ID                     0xD0
 #define   BME680_CHIP_ID_VAL                   0x61
-#define BME680_REG_SOFT_RESET_I2C              0xE0
-#define BME680_REG_SOFT_RESET_SPI              0x60
+#define BME680_REG_SOFT_RESET                  0xE0
 #define   BME680_CMD_SOFTRESET                 0xB6
 #define BME680_REG_STATUS                      0x73
 #define   BME680_SPI_MEM_PAGE_BIT              BIT(4)
index 70c1fe4366f4c6a17100b469452f8903281e665e..ccde4c65ff9340b2bcc4a730dc978daf918fa08c 100644 (file)
@@ -63,9 +63,23 @@ struct bme680_data {
        s32 t_fine;
 };
 
+static const struct regmap_range bme680_volatile_ranges[] = {
+       regmap_reg_range(BME680_REG_MEAS_STAT_0, BME680_REG_GAS_R_LSB),
+       regmap_reg_range(BME680_REG_STATUS, BME680_REG_STATUS),
+       regmap_reg_range(BME680_T2_LSB_REG, BME680_GH3_REG),
+};
+
+static const struct regmap_access_table bme680_volatile_table = {
+       .yes_ranges     = bme680_volatile_ranges,
+       .n_yes_ranges   = ARRAY_SIZE(bme680_volatile_ranges),
+};
+
 const struct regmap_config bme680_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
+       .max_register = 0xef,
+       .volatile_table = &bme680_volatile_table,
+       .cache_type = REGCACHE_RBTREE,
 };
 EXPORT_SYMBOL(bme680_regmap_config);
 
@@ -316,6 +330,10 @@ static s16 bme680_compensate_temp(struct bme680_data *data,
        s64 var1, var2, var3;
        s16 calc_temp;
 
+       /* If the calibration is invalid, attempt to reload it */
+       if (!calib->par_t2)
+               bme680_read_calib(data, calib);
+
        var1 = (adc_temp >> 3) - (calib->par_t1 << 1);
        var2 = (var1 * calib->par_t2) >> 11;
        var3 = ((var1 >> 1) * (var1 >> 1)) >> 12;
@@ -583,8 +601,7 @@ static int bme680_gas_config(struct bme680_data *data)
        return ret;
 }
 
-static int bme680_read_temp(struct bme680_data *data,
-                           int *val, int *val2)
+static int bme680_read_temp(struct bme680_data *data, int *val)
 {
        struct device *dev = regmap_get_device(data->regmap);
        int ret;
@@ -617,10 +634,9 @@ static int bme680_read_temp(struct bme680_data *data,
         * compensate_press/compensate_humid to get compensated
         * pressure/humidity readings.
         */
-       if (val && val2) {
-               *val = comp_temp;
-               *val2 = 100;
-               return IIO_VAL_FRACTIONAL;
+       if (val) {
+               *val = comp_temp * 10; /* Centidegrees to millidegrees */
+               return IIO_VAL_INT;
        }
 
        return ret;
@@ -635,7 +651,7 @@ static int bme680_read_press(struct bme680_data *data,
        s32 adc_press;
 
        /* Read and compensate temperature to get a reading of t_fine */
-       ret = bme680_read_temp(data, NULL, NULL);
+       ret = bme680_read_temp(data, NULL);
        if (ret < 0)
                return ret;
 
@@ -668,7 +684,7 @@ static int bme680_read_humid(struct bme680_data *data,
        u32 comp_humidity;
 
        /* Read and compensate temperature to get a reading of t_fine */
-       ret = bme680_read_temp(data, NULL, NULL);
+       ret = bme680_read_temp(data, NULL);
        if (ret < 0)
                return ret;
 
@@ -761,7 +777,7 @@ static int bme680_read_raw(struct iio_dev *indio_dev,
        case IIO_CHAN_INFO_PROCESSED:
                switch (chan->type) {
                case IIO_TEMP:
-                       return bme680_read_temp(data, val, val2);
+                       return bme680_read_temp(data, val);
                case IIO_PRESSURE:
                        return bme680_read_press(data, val, val2);
                case IIO_HUMIDITYRELATIVE:
@@ -867,8 +883,28 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap,
 {
        struct iio_dev *indio_dev;
        struct bme680_data *data;
+       unsigned int val;
        int ret;
 
+       ret = regmap_write(regmap, BME680_REG_SOFT_RESET,
+                          BME680_CMD_SOFTRESET);
+       if (ret < 0) {
+               dev_err(dev, "Failed to reset chip\n");
+               return ret;
+       }
+
+       ret = regmap_read(regmap, BME680_REG_CHIP_ID, &val);
+       if (ret < 0) {
+               dev_err(dev, "Error reading chip ID\n");
+               return ret;
+       }
+
+       if (val != BME680_CHIP_ID_VAL) {
+               dev_err(dev, "Wrong chip ID, got %x expected %x\n",
+                               val, BME680_CHIP_ID_VAL);
+               return -ENODEV;
+       }
+
        indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
        if (!indio_dev)
                return -ENOMEM;
index b2f805b6b36a4904fea4183aa66cce10304b6a67..de9c9e3d23ea347824f0a480031bf46e11021c6a 100644 (file)
@@ -23,8 +23,6 @@ static int bme680_i2c_probe(struct i2c_client *client,
 {
        struct regmap *regmap;
        const char *name = NULL;
-       unsigned int val;
-       int ret;
 
        regmap = devm_regmap_init_i2c(client, &bme680_regmap_config);
        if (IS_ERR(regmap)) {
@@ -33,25 +31,6 @@ static int bme680_i2c_probe(struct i2c_client *client,
                return PTR_ERR(regmap);
        }
 
-       ret = regmap_write(regmap, BME680_REG_SOFT_RESET_I2C,
-                          BME680_CMD_SOFTRESET);
-       if (ret < 0) {
-               dev_err(&client->dev, "Failed to reset chip\n");
-               return ret;
-       }
-
-       ret = regmap_read(regmap, BME680_REG_CHIP_I2C_ID, &val);
-       if (ret < 0) {
-               dev_err(&client->dev, "Error reading I2C chip ID\n");
-               return ret;
-       }
-
-       if (val != BME680_CHIP_ID_VAL) {
-               dev_err(&client->dev, "Wrong chip ID, got %x expected %x\n",
-                               val, BME680_CHIP_ID_VAL);
-               return -ENODEV;
-       }
-
        if (id)
                name = id->name;
 
index d0b7bdd3f066021938f436b3398a5c5532884a35..3b838068a7e48d7378597aeec7eef7da5953cd58 100644 (file)
 
 #include "bme680.h"
 
+struct bme680_spi_bus_context {
+       struct spi_device *spi;
+       u8 current_page;
+};
+
+/*
+ * In SPI mode there are only 7 address bits, a "page" register determines
+ * which part of the 8-bit range is active. This function looks at the address
+ * and writes the page selection bit if needed
+ */
+static int bme680_regmap_spi_select_page(
+       struct bme680_spi_bus_context *ctx, u8 reg)
+{
+       struct spi_device *spi = ctx->spi;
+       int ret;
+       u8 buf[2];
+       u8 page = (reg & 0x80) ? 0 : 1; /* Page "1" is low range */
+
+       if (page == ctx->current_page)
+               return 0;
+
+       /*
+        * Data sheet claims we're only allowed to change bit 4, so we must do
+        * a read-modify-write on each and every page select
+        */
+       buf[0] = BME680_REG_STATUS;
+       ret = spi_write_then_read(spi, buf, 1, buf + 1, 1);
+       if (ret < 0) {
+               dev_err(&spi->dev, "failed to set page %u\n", page);
+               return ret;
+       }
+
+       buf[0] = BME680_REG_STATUS;
+       if (page)
+               buf[1] |= BME680_SPI_MEM_PAGE_BIT;
+       else
+               buf[1] &= ~BME680_SPI_MEM_PAGE_BIT;
+
+       ret = spi_write(spi, buf, 2);
+       if (ret < 0) {
+               dev_err(&spi->dev, "failed to set page %u\n", page);
+               return ret;
+       }
+
+       ctx->current_page = page;
+
+       return 0;
+}
+
 static int bme680_regmap_spi_write(void *context, const void *data,
                                   size_t count)
 {
-       struct spi_device *spi = context;
+       struct bme680_spi_bus_context *ctx = context;
+       struct spi_device *spi = ctx->spi;
+       int ret;
        u8 buf[2];
 
        memcpy(buf, data, 2);
+
+       ret = bme680_regmap_spi_select_page(ctx, buf[0]);
+       if (ret)
+               return ret;
+
        /*
         * The SPI register address (= full register address without bit 7)
         * and the write command (bit7 = RW = '0')
         */
        buf[0] &= ~0x80;
 
-       return spi_write_then_read(spi, buf, 2, NULL, 0);
+       return spi_write(spi, buf, 2);
 }
 
 static int bme680_regmap_spi_read(void *context, const void *reg,
                                  size_t reg_size, void *val, size_t val_size)
 {
-       struct spi_device *spi = context;
+       struct bme680_spi_bus_context *ctx = context;
+       struct spi_device *spi = ctx->spi;
+       int ret;
+       u8 addr = *(const u8 *)reg;
+
+       ret = bme680_regmap_spi_select_page(ctx, addr);
+       if (ret)
+               return ret;
 
-       return spi_write_then_read(spi, reg, reg_size, val, val_size);
+       addr |= 0x80; /* bit7 = RW = '1' */
+
+       return spi_write_then_read(spi, &addr, 1, val, val_size);
 }
 
 static struct regmap_bus bme680_regmap_bus = {
@@ -46,8 +111,8 @@ static struct regmap_bus bme680_regmap_bus = {
 static int bme680_spi_probe(struct spi_device *spi)
 {
        const struct spi_device_id *id = spi_get_device_id(spi);
+       struct bme680_spi_bus_context *bus_context;
        struct regmap *regmap;
-       unsigned int val;
        int ret;
 
        spi->bits_per_word = 8;
@@ -57,45 +122,21 @@ static int bme680_spi_probe(struct spi_device *spi)
                return ret;
        }
 
+       bus_context = devm_kzalloc(&spi->dev, sizeof(*bus_context), GFP_KERNEL);
+       if (!bus_context)
+               return -ENOMEM;
+
+       bus_context->spi = spi;
+       bus_context->current_page = 0xff; /* Undefined on warm boot */
+
        regmap = devm_regmap_init(&spi->dev, &bme680_regmap_bus,
-                                 &spi->dev, &bme680_regmap_config);
+                                 bus_context, &bme680_regmap_config);
        if (IS_ERR(regmap)) {
                dev_err(&spi->dev, "Failed to register spi regmap %d\n",
                                (int)PTR_ERR(regmap));
                return PTR_ERR(regmap);
        }
 
-       ret = regmap_write(regmap, BME680_REG_SOFT_RESET_SPI,
-                          BME680_CMD_SOFTRESET);
-       if (ret < 0) {
-               dev_err(&spi->dev, "Failed to reset chip\n");
-               return ret;
-       }
-
-       /* after power-on reset, Page 0(0x80-0xFF) of spi_mem_page is active */
-       ret = regmap_read(regmap, BME680_REG_CHIP_SPI_ID, &val);
-       if (ret < 0) {
-               dev_err(&spi->dev, "Error reading SPI chip ID\n");
-               return ret;
-       }
-
-       if (val != BME680_CHIP_ID_VAL) {
-               dev_err(&spi->dev, "Wrong chip ID, got %x expected %x\n",
-                               val, BME680_CHIP_ID_VAL);
-               return -ENODEV;
-       }
-       /*
-        * select Page 1 of spi_mem_page to enable access to
-        * to registers from address 0x00 to 0x7F.
-        */
-       ret = regmap_write_bits(regmap, BME680_REG_STATUS,
-                               BME680_SPI_MEM_PAGE_BIT,
-                               BME680_SPI_MEM_PAGE_1_VAL);
-       if (ret < 0) {
-               dev_err(&spi->dev, "failed to set page 1 of spi_mem_page\n");
-               return ret;
-       }
-
        return bme680_core_probe(&spi->dev, regmap, id->name);
 }
 
index 89cb0066a6e0839f49fd68fb2395e8425b174c90..8d76afb87d87c58322b3ee8835ea31f5edc5a834 100644 (file)
@@ -103,9 +103,10 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
                         * Do not use IIO_DEGREE_TO_RAD to avoid precision
                         * loss. Round to the nearest integer.
                         */
-                       *val = div_s64(val64 * 314159 + 9000000ULL, 1000);
-                       *val2 = 18000 << (CROS_EC_SENSOR_BITS - 1);
-                       ret = IIO_VAL_FRACTIONAL;
+                       *val = 0;
+                       *val2 = div_s64(val64 * 3141592653ULL,
+                                       180 << (CROS_EC_SENSOR_BITS - 1));
+                       ret = IIO_VAL_INT_PLUS_NANO;
                        break;
                case MOTIONSENSE_TYPE_MAG:
                        /*
index 6d71fd905e29d69b0a5b1afa99c5451037333153..c701a45469f6436746b8c7e7b2da0680c45829ec 100644 (file)
@@ -92,6 +92,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev,
 
        inoutbuf[0] = 0x60; /* write EEPROM */
        inoutbuf[0] |= data->ref_mode << 3;
+       inoutbuf[0] |= data->powerdown ? ((data->powerdown_mode + 1) << 1) : 0;
        inoutbuf[1] = data->dac_value >> 4;
        inoutbuf[2] = (data->dac_value & 0xf) << 4;
 
index 63ca31628a93af8454f5fce9ced15d752f28f16d..92c07ab826eb32c9d4665728159dbd4358cefc80 100644 (file)
@@ -582,11 +582,10 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
        case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
                return bmg160_get_filter(data, val);
        case IIO_CHAN_INFO_SCALE:
-               *val = 0;
                switch (chan->type) {
                case IIO_TEMP:
-                       *val2 = 500000;
-                       return IIO_VAL_INT_PLUS_MICRO;
+                       *val = 500;
+                       return IIO_VAL_INT;
                case IIO_ANGL_VEL:
                {
                        int i;
@@ -594,6 +593,7 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
                        for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) {
                                if (bmg160_scale_table[i].dps_range ==
                                                        data->dps_range) {
+                                       *val = 0;
                                        *val2 = bmg160_scale_table[i].scale;
                                        return IIO_VAL_INT_PLUS_MICRO;
                                }
index 77fac81a3adce2245fe0bf499b60a382c08af98b..5ddebede31a6f6f3625263893ad27a0f3fb68b88 100644 (file)
@@ -29,7 +29,8 @@
 
 #include "mpu3050.h"
 
-#define MPU3050_CHIP_ID                0x69
+#define MPU3050_CHIP_ID                0x68
+#define MPU3050_CHIP_ID_MASK   0x7E
 
 /*
  * Register map: anything suffixed *_H is a big-endian high byte and always
@@ -1176,8 +1177,9 @@ int mpu3050_common_probe(struct device *dev,
                goto err_power_down;
        }
 
-       if (val != MPU3050_CHIP_ID) {
-               dev_err(dev, "unsupported chip id %02x\n", (u8)val);
+       if ((val & MPU3050_CHIP_ID_MASK) != MPU3050_CHIP_ID) {
+               dev_err(dev, "unsupported chip id %02x\n",
+                               (u8)(val & MPU3050_CHIP_ID_MASK));
                ret = -ENODEV;
                goto err_power_down;
        }
index cd5bfe39591bb2b2d44b3848cc2d84ef2d4a38f2..dadd921a4a30fdb527faf9e0b8e359ba6fa61bc0 100644 (file)
@@ -320,9 +320,8 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
        const unsigned long *mask;
        unsigned long *trialmask;
 
-       trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength),
-                                 sizeof(*trialmask),
-                                 GFP_KERNEL);
+       trialmask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
+                           sizeof(*trialmask), GFP_KERNEL);
        if (trialmask == NULL)
                return -ENOMEM;
        if (!indio_dev->masklength) {
index 4700fd5d8c90a6ebaee08659b4bb17ca9db105a4..9c4d92115504ae093b8990ffdd4bb649f2796228 100644 (file)
@@ -1743,10 +1743,10 @@ EXPORT_SYMBOL(__iio_device_register);
  **/
 void iio_device_unregister(struct iio_dev *indio_dev)
 {
-       mutex_lock(&indio_dev->info_exist_lock);
-
        cdev_device_del(&indio_dev->chrdev, &indio_dev->dev);
 
+       mutex_lock(&indio_dev->info_exist_lock);
+
        iio_device_unregister_debugfs(indio_dev);
 
        iio_disable_all_buffers(indio_dev);
index 70b7d80431a9b935b9a7ffa6fa50be6601f9c4a0..f2e7ffe6fc546612f62da9cde853b9c1bf37d8bb 100644 (file)
@@ -993,6 +993,8 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
                 * will only be one mm, so no big deal.
                 */
                down_write(&mm->mmap_sem);
+               if (!mmget_still_valid(mm))
+                       goto skip_mm;
                mutex_lock(&ufile->umap_lock);
                list_for_each_entry_safe (priv, next_priv, &ufile->umaps,
                                          list) {
@@ -1007,6 +1009,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
                        vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
                }
                mutex_unlock(&ufile->umap_lock);
+       skip_mm:
                up_write(&mm->mmap_sem);
                mmput(mm);
        }
index 612f04190ed8386e51ab5f8321464320140c1e77..9784c6c0d2ecfbbca031871f54fcc415602029fc 100644 (file)
@@ -13232,7 +13232,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
        int total_contexts;
        int ret;
        unsigned ngroups;
-       int qos_rmt_count;
+       int rmt_count;
        int user_rmt_reduced;
        u32 n_usr_ctxts;
        u32 send_contexts = chip_send_contexts(dd);
@@ -13294,10 +13294,20 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
                n_usr_ctxts = rcv_contexts - total_contexts;
        }
 
-       /* each user context requires an entry in the RMT */
-       qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
-       if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
-               user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
+       /*
+        * The RMT entries are currently allocated as shown below:
+        * 1. QOS (0 to 128 entries);
+        * 2. FECN for PSM (num_user_contexts + num_vnic_contexts);
+        * 3. VNIC (num_vnic_contexts).
+        * It should be noted that PSM FECN oversubscribe num_vnic_contexts
+        * entries of RMT because both VNIC and PSM could allocate any receive
+        * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
+        * and PSM FECN must reserve an RMT entry for each possible PSM receive
+        * context.
+        */
+       rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
+       if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
+               user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
                dd_dev_err(dd,
                           "RMT size is reducing the number of user receive contexts from %u to %d\n",
                           n_usr_ctxts,
@@ -14285,9 +14295,11 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
        u64 reg;
        int i, idx, regoff, regidx;
        u8 offset;
+       u32 total_cnt;
 
        /* there needs to be enough room in the map table */
-       if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
+       total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
+       if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
                dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
                return;
        }
@@ -14341,7 +14353,7 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
        /* add rule 1 */
        add_rsm_rule(dd, RSM_INS_FECN, &rrd);
 
-       rmt->used += dd->num_user_contexts;
+       rmt->used += total_cnt;
 }
 
 /* Initialize RSM for VNIC */
index 9b643c2409cf8bee5c28084b104c2839bc5768aa..eba300330a027acdae1b97c92af5ef07ece6b605 100644 (file)
@@ -898,7 +898,9 @@ void notify_error_qp(struct rvt_qp *qp)
                if (!list_empty(&priv->s_iowait.list) &&
                    !(qp->s_flags & RVT_S_BUSY) &&
                    !(priv->s_flags & RVT_S_BUSY)) {
-                       qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
+                       qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
+                       iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
+                       iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
                        list_del_init(&priv->s_iowait.list);
                        priv->s_iowait.lock = NULL;
                        rvt_put_qp(qp);
index e6726c1ab8669a66722835b43d8b6b3481a11754..5991211d72bdd84d307ab4ebc245a011899eb4bc 100644 (file)
@@ -3088,7 +3088,7 @@ send_last:
                        update_ack_queue(qp, next);
                }
                e = &qp->s_ack_queue[qp->r_head_ack_queue];
-               if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+               if (e->rdma_sge.mr) {
                        rvt_put_mr(e->rdma_sge.mr);
                        e->rdma_sge.mr = NULL;
                }
@@ -3166,7 +3166,7 @@ send_last:
                        update_ack_queue(qp, next);
                }
                e = &qp->s_ack_queue[qp->r_head_ack_queue];
-               if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+               if (e->rdma_sge.mr) {
                        rvt_put_mr(e->rdma_sge.mr);
                        e->rdma_sge.mr = NULL;
                }
index fdda33aca77f2031ea2357435c029de0508ffdec..43cbce7a19ea43f42af2464a782221da2ee386bf 100644 (file)
@@ -5017,24 +5017,14 @@ int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
            make_tid_rdma_ack(qp, ohdr, ps))
                return 1;
 
-       if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
-               if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
-                       goto bail;
-               /* We are in the error state, flush the work request. */
-               if (qp->s_last == READ_ONCE(qp->s_head))
-                       goto bail;
-               /* If DMAs are in progress, we can't flush immediately. */
-               if (iowait_sdma_pending(&priv->s_iowait)) {
-                       qp->s_flags |= RVT_S_WAIT_DMA;
-                       goto bail;
-               }
-               clear_ahg(qp);
-               wqe = rvt_get_swqe_ptr(qp, qp->s_last);
-               hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
-                                        IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
-               /* will get called again */
-               goto done_free_tx;
-       }
+       /*
+        * Bail out if we can't send data.
+        * Be reminded that this check must been done after the call to
+        * make_tid_rdma_ack() because the responding QP could be in
+        * RTR state where it can send TID RDMA ACK, not TID RDMA WRITE DATA.
+        */
+       if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK))
+               goto bail;
 
        if (priv->s_flags & RVT_S_WAIT_ACK)
                goto bail;
@@ -5144,11 +5134,6 @@ int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
        hfi1_make_ruc_header(qp, ohdr, (opcode << 24), bth1, bth2,
                             middle, ps);
        return 1;
-done_free_tx:
-       hfi1_put_txreq(ps->s_txreq);
-       ps->s_txreq = NULL;
-       return 1;
-
 bail:
        hfi1_put_txreq(ps->s_txreq);
 bail_no_tx:
index f1fec56f3ff49047d7ade725d13b43d3dba8bbb6..8e29dbb5b5fbc3bd883384915e76155201572848 100644 (file)
@@ -792,6 +792,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
                idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk;
                dma_offset = offset = idx_offset * table->obj_size;
        } else {
+               u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */
+
                hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
                /* mtt mhop */
                i = mhop.l0_idx;
@@ -803,8 +805,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
                        hem_idx = i;
 
                hem = table->hem[hem_idx];
-               dma_offset = offset = (obj & (table->num_obj - 1)) *
-                                      table->obj_size % mhop.bt_chunk_size;
+               dma_offset = offset = (obj & (table->num_obj - 1)) * seg_size %
+                                      mhop.bt_chunk_size;
                if (mhop.hop_num == 2)
                        dma_offset = offset = 0;
        }
index b09f1cde2ff54ca9522a60d21ba3e82967938574..08be0e4eabcd764e9af0a666cec02fad1e921f76 100644 (file)
@@ -746,7 +746,6 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
        struct hns_roce_hem_table *table;
        dma_addr_t dma_handle;
        __le64 *mtts;
-       u32 s = start_index * sizeof(u64);
        u32 bt_page_size;
        u32 i;
 
@@ -780,7 +779,8 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
                return -EINVAL;
 
        mtts = hns_roce_table_find(hr_dev, table,
-                               mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
+                               mtt->first_seg +
+                               start_index / HNS_ROCE_MTT_ENTRY_PER_SEG,
                                &dma_handle);
        if (!mtts)
                return -ENOMEM;
index 57c76eafef2f8a896ff336af5bfa10954c99f1e4..66cdf625534ff8901a6efdf90295eaee3cc0145f 100644 (file)
@@ -274,9 +274,6 @@ void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
        wait_for_completion(&hr_qp->free);
 
        if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
-               if (hr_dev->caps.sccc_entry_sz)
-                       hns_roce_table_put(hr_dev, &qp_table->sccc_table,
-                                          hr_qp->qpn);
                if (hr_dev->caps.trrl_entry_sz)
                        hns_roce_table_put(hr_dev, &qp_table->trrl_table,
                                           hr_qp->qpn);
index c20bfc41ecf18602cd0f289941d49dd6d37390da..0aa10ebda5d9af2f60f5d98807a2de6ec307ad04 100644 (file)
@@ -585,7 +585,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
        struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
        bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
        bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
-       u64 access_mask = ODP_READ_ALLOWED_BIT;
+       u64 access_mask;
        u64 start_idx, page_mask;
        struct ib_umem_odp *odp;
        size_t size;
@@ -607,6 +607,7 @@ next_mr:
        page_shift = mr->umem->page_shift;
        page_mask = ~(BIT(page_shift) - 1);
        start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
+       access_mask = ODP_READ_ALLOWED_BIT;
 
        if (prefetch && !downgrade && !mr->umem->writable) {
                /* prefetch with write-access must
index 6d8b3e0de57a8e0d3e8071d9aebd0707618cebb4..ec41400fec0c01aa4d2894b7652701a90a190fff 100644 (file)
@@ -1131,6 +1131,8 @@ static void pvrdma_pci_remove(struct pci_dev *pdev)
        pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
        pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
        pvrdma_free_slots(dev);
+       dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
+                         dev->dsrbase);
 
        iounmap(dev->regs);
        kfree(dev->sgid_tbl);
index effb63205d3d7783e8e4e598407332892ef6aae2..4c67cf30a5d9ab14bff5f5c53d289ba347d241f4 100644 (file)
@@ -148,6 +148,9 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
                return error;
        }
 
+       pdata->input = input;
+       platform_set_drvdata(pdev, pdata);
+
        error = devm_request_irq(&pdev->dev, pdata->irq,
                               imx_snvs_pwrkey_interrupt,
                               0, pdev->name, pdev);
@@ -163,9 +166,6 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
                return error;
        }
 
-       pdata->input = input;
-       platform_set_drvdata(pdev, pdata);
-
        device_init_wakeup(&pdev->dev, pdata->wakeup);
 
        return 0;
index 628ef617bb2f7f51301d5b422905140fa53b1c1a..f9525d6f0bfe810c9ab1c2bd0a2a971f2e9695b4 100644 (file)
@@ -1339,21 +1339,46 @@ static const struct acpi_device_id elan_acpi_id[] = {
        { "ELAN0600", 0 },
        { "ELAN0601", 0 },
        { "ELAN0602", 0 },
+       { "ELAN0603", 0 },
+       { "ELAN0604", 0 },
        { "ELAN0605", 0 },
+       { "ELAN0606", 0 },
+       { "ELAN0607", 0 },
        { "ELAN0608", 0 },
        { "ELAN0609", 0 },
        { "ELAN060B", 0 },
        { "ELAN060C", 0 },
+       { "ELAN060F", 0 },
+       { "ELAN0610", 0 },
        { "ELAN0611", 0 },
        { "ELAN0612", 0 },
+       { "ELAN0615", 0 },
+       { "ELAN0616", 0 },
        { "ELAN0617", 0 },
        { "ELAN0618", 0 },
+       { "ELAN0619", 0 },
+       { "ELAN061A", 0 },
+       { "ELAN061B", 0 },
        { "ELAN061C", 0 },
        { "ELAN061D", 0 },
        { "ELAN061E", 0 },
+       { "ELAN061F", 0 },
        { "ELAN0620", 0 },
        { "ELAN0621", 0 },
        { "ELAN0622", 0 },
+       { "ELAN0623", 0 },
+       { "ELAN0624", 0 },
+       { "ELAN0625", 0 },
+       { "ELAN0626", 0 },
+       { "ELAN0627", 0 },
+       { "ELAN0628", 0 },
+       { "ELAN0629", 0 },
+       { "ELAN062A", 0 },
+       { "ELAN062B", 0 },
+       { "ELAN062C", 0 },
+       { "ELAN062D", 0 },
+       { "ELAN0631", 0 },
+       { "ELAN0632", 0 },
        { "ELAN1000", 0 },
        { }
 };
index 1b1378619fc9ec2f0caa0bbbd262192c21de61e4..ff40ba758cf365e89ddeb2270971e1536554b817 100644 (file)
@@ -359,7 +359,7 @@ static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
 {
        u64 start = iommu->exclusion_start & PAGE_MASK;
-       u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
+       u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
        u64 entry;
 
        if (!iommu->exclusion_start)
index 86b72fbd3b45dd63f495de36435410997740f473..353111a104133a0faeadf5d46e3e2f1bbf617d27 100644 (file)
@@ -130,6 +130,7 @@ static int __init ls1x_intc_of_init(struct device_node *node,
                                             NULL);
        if (!priv->domain) {
                pr_err("ls1x-irq: cannot add IRQ domain\n");
+               err = -ENOMEM;
                goto out_iounmap;
        }
 
index 4ab8b1b6608f7136365f91d713f65647a8271296..a14e35d405387d4dc43bf672139c773bf4b05d2f 100644 (file)
@@ -710,10 +710,10 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
        struct sock *sk = sock->sk;
        int err = 0;
 
-       if (!maddr || maddr->family != AF_ISDN)
+       if (addr_len < sizeof(struct sockaddr_mISDN))
                return -EINVAL;
 
-       if (addr_len < sizeof(struct sockaddr_mISDN))
+       if (!maddr || maddr->family != AF_ISDN)
                return -EINVAL;
 
        lock_sock(sk);
index 3789185144dae34241911d3c42308e6cbad650f2..0b7d5fb4548dcd8720f86e98a2567e3d387a061d 100644 (file)
@@ -231,14 +231,14 @@ static void pblk_end_partial_read(struct nvm_rq *rqd)
        struct pblk_sec_meta *meta;
        struct bio *new_bio = rqd->bio;
        struct bio *bio = pr_ctx->orig_bio;
-       struct bio_vec src_bv, dst_bv;
        void *meta_list = rqd->meta_list;
-       int bio_init_idx = pr_ctx->bio_init_idx;
        unsigned long *read_bitmap = pr_ctx->bitmap;
+       struct bvec_iter orig_iter = BVEC_ITER_ALL_INIT;
+       struct bvec_iter new_iter = BVEC_ITER_ALL_INIT;
        int nr_secs = pr_ctx->orig_nr_secs;
        int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
        void *src_p, *dst_p;
-       int hole, i;
+       int bit, i;
 
        if (unlikely(nr_holes == 1)) {
                struct ppa_addr ppa;
@@ -257,33 +257,39 @@ static void pblk_end_partial_read(struct nvm_rq *rqd)
 
        /* Fill the holes in the original bio */
        i = 0;
-       hole = find_first_zero_bit(read_bitmap, nr_secs);
-       do {
-               struct pblk_line *line;
+       for (bit = 0; bit < nr_secs; bit++) {
+               if (!test_bit(bit, read_bitmap)) {
+                       struct bio_vec dst_bv, src_bv;
+                       struct pblk_line *line;
 
-               line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
-               kref_put(&line->ref, pblk_line_put);
+                       line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
+                       kref_put(&line->ref, pblk_line_put);
 
-               meta = pblk_get_meta(pblk, meta_list, hole);
-               meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]);
+                       meta = pblk_get_meta(pblk, meta_list, bit);
+                       meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]);
 
-               src_bv = new_bio->bi_io_vec[i++];
-               dst_bv = bio->bi_io_vec[bio_init_idx + hole];
+                       dst_bv = bio_iter_iovec(bio, orig_iter);
+                       src_bv = bio_iter_iovec(new_bio, new_iter);
 
-               src_p = kmap_atomic(src_bv.bv_page);
-               dst_p = kmap_atomic(dst_bv.bv_page);
+                       src_p = kmap_atomic(src_bv.bv_page);
+                       dst_p = kmap_atomic(dst_bv.bv_page);
 
-               memcpy(dst_p + dst_bv.bv_offset,
-                       src_p + src_bv.bv_offset,
-                       PBLK_EXPOSED_PAGE_SIZE);
+                       memcpy(dst_p + dst_bv.bv_offset,
+                               src_p + src_bv.bv_offset,
+                               PBLK_EXPOSED_PAGE_SIZE);
 
-               kunmap_atomic(src_p);
-               kunmap_atomic(dst_p);
+                       kunmap_atomic(src_p);
+                       kunmap_atomic(dst_p);
 
-               mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
+                       flush_dcache_page(dst_bv.bv_page);
+                       mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
 
-               hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
-       } while (hole < nr_secs);
+                       bio_advance_iter(new_bio, &new_iter,
+                                       PBLK_EXPOSED_PAGE_SIZE);
+                       i++;
+               }
+               bio_advance_iter(bio, &orig_iter, PBLK_EXPOSED_PAGE_SIZE);
+       }
 
        bio_put(new_bio);
        kfree(pr_ctx);
index 95c6d86ab5e8deaa0d5975240f708fe165061e6e..c4ef1fceead6ee1ba83bfb0e54f2eefae26bd48b 100644 (file)
@@ -115,6 +115,7 @@ struct mapped_device {
        struct srcu_struct io_barrier;
 };
 
+void disable_discard(struct mapped_device *md);
 void disable_write_same(struct mapped_device *md);
 void disable_write_zeroes(struct mapped_device *md);
 
index b53f30f16b4d4f2c02bf9c15e5b801234b8cd9ae..4b76f84424c3c1a73ef3bc3b9605a1486e3bf88b 100644 (file)
@@ -36,7 +36,7 @@ struct dm_device {
        struct list_head list;
 };
 
-const char *dm_allowed_targets[] __initconst = {
+const char * const dm_allowed_targets[] __initconst = {
        "crypt",
        "delay",
        "linear",
index d57d997a52c81cfe6c68918520316f993aeebc44..7c678f50aaa37a5612ea23bed69e0fb31526224e 100644 (file)
@@ -913,7 +913,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig
 static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
 {
        return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
-              range2->logical_sector + range2->n_sectors > range2->logical_sector;
+              range1->logical_sector + range1->n_sectors > range2->logical_sector;
 }
 
 static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
@@ -959,8 +959,6 @@ static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity
                struct dm_integrity_range *last_range =
                        list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
                struct task_struct *last_range_task;
-               if (!ranges_overlap(range, last_range))
-                       break;
                last_range_task = last_range->task;
                list_del(&last_range->wait_entry);
                if (!add_new_range(ic, last_range, false)) {
@@ -3185,7 +3183,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
                        journal_watermark = val;
                else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
                        sync_msec = val;
-               else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) {
+               else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
                        if (ic->meta_dev) {
                                dm_put_device(ti, ic->meta_dev);
                                ic->meta_dev = NULL;
@@ -3204,17 +3202,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
                                goto bad;
                        }
                        ic->sectors_per_block = val >> SECTOR_SHIFT;
-               } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
+               } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
                        r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
                                            "Invalid internal_hash argument");
                        if (r)
                                goto bad;
-               } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
+               } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
                        r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
                                            "Invalid journal_crypt argument");
                        if (r)
                                goto bad;
-               } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
+               } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
                        r = get_alg_and_key(opt_string, &ic->journal_mac_alg,  &ti->error,
                                            "Invalid journal_mac argument");
                        if (r)
@@ -3616,7 +3614,7 @@ static struct target_type integrity_target = {
        .io_hints               = dm_integrity_io_hints,
 };
 
-int __init dm_integrity_init(void)
+static int __init dm_integrity_init(void)
 {
        int r;
 
@@ -3635,7 +3633,7 @@ int __init dm_integrity_init(void)
        return r;
 }
 
-void dm_integrity_exit(void)
+static void __exit dm_integrity_exit(void)
 {
        dm_unregister_target(&integrity_target);
        kmem_cache_destroy(journal_io_cache);
index 09773636602d3d86728b127ddb67b2d674b67cae..b66745bd08bbcc2dd1ab349f47c7326199518778 100644 (file)
@@ -222,11 +222,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
        }
 
        if (unlikely(error == BLK_STS_TARGET)) {
-               if (req_op(clone) == REQ_OP_WRITE_SAME &&
-                   !clone->q->limits.max_write_same_sectors)
+               if (req_op(clone) == REQ_OP_DISCARD &&
+                   !clone->q->limits.max_discard_sectors)
+                       disable_discard(tio->md);
+               else if (req_op(clone) == REQ_OP_WRITE_SAME &&
+                        !clone->q->limits.max_write_same_sectors)
                        disable_write_same(tio->md);
-               if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
-                   !clone->q->limits.max_write_zeroes_sectors)
+               else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
+                        !clone->q->limits.max_write_zeroes_sectors)
                        disable_write_zeroes(tio->md);
        }
 
index ba9481f1bf3c04cf64c7ea5e570f2a2bf533759c..cde3b49b2a9107abafd76d190c9fc61209141f7b 100644 (file)
@@ -1844,6 +1844,36 @@ static bool dm_table_supports_secure_erase(struct dm_table *t)
        return true;
 }
 
+static int device_requires_stable_pages(struct dm_target *ti,
+                                       struct dm_dev *dev, sector_t start,
+                                       sector_t len, void *data)
+{
+       struct request_queue *q = bdev_get_queue(dev->bdev);
+
+       return q && bdi_cap_stable_pages_required(q->backing_dev_info);
+}
+
+/*
+ * If any underlying device requires stable pages, a table must require
+ * them as well.  Only targets that support iterate_devices are considered:
+ * don't want error, zero, etc to require stable pages.
+ */
+static bool dm_table_requires_stable_pages(struct dm_table *t)
+{
+       struct dm_target *ti;
+       unsigned i;
+
+       for (i = 0; i < dm_table_get_num_targets(t); i++) {
+               ti = dm_table_get_target(t, i);
+
+               if (ti->type->iterate_devices &&
+                   ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
+                       return true;
+       }
+
+       return false;
+}
+
 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
                               struct queue_limits *limits)
 {
@@ -1896,6 +1926,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
 
        dm_table_verify_integrity(t);
 
+       /*
+        * Some devices don't use blk_integrity but still want stable pages
+        * because they do their own checksumming.
+        */
+       if (dm_table_requires_stable_pages(t))
+               q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
+       else
+               q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
+
        /*
         * Determine whether or not this queue's I/O timings contribute
         * to the entropy pool, Only request-based targets use this.
index 68d24056d0b1c17d7fa0c271d1d5582d7eb72c89..043f0761e4a0aea8a22a1c6745f3f9bbbc021dfd 100644 (file)
@@ -945,6 +945,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
        }
 }
 
+void disable_discard(struct mapped_device *md)
+{
+       struct queue_limits *limits = dm_get_queue_limits(md);
+
+       /* device doesn't really support DISCARD, disable it */
+       limits->max_discard_sectors = 0;
+       blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
+}
+
 void disable_write_same(struct mapped_device *md)
 {
        struct queue_limits *limits = dm_get_queue_limits(md);
@@ -970,11 +979,14 @@ static void clone_endio(struct bio *bio)
        dm_endio_fn endio = tio->ti->type->end_io;
 
        if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
-               if (bio_op(bio) == REQ_OP_WRITE_SAME &&
-                   !bio->bi_disk->queue->limits.max_write_same_sectors)
+               if (bio_op(bio) == REQ_OP_DISCARD &&
+                   !bio->bi_disk->queue->limits.max_discard_sectors)
+                       disable_discard(md);
+               else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
+                        !bio->bi_disk->queue->limits.max_write_same_sectors)
                        disable_write_same(md);
-               if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
-                   !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
+               else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
+                        !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
                        disable_write_zeroes(md);
        }
 
@@ -1042,15 +1054,7 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
                return -EINVAL;
        }
 
-       /*
-        * BIO based queue uses its own splitting. When multipage bvecs
-        * is switched on, size of the incoming bio may be too big to
-        * be handled in some targets, such as crypt.
-        *
-        * When these targets are ready for the big bio, we can remove
-        * the limit.
-        */
-       ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE);
+       ti->max_io_len = (uint32_t) len;
 
        return 0;
 }
index 0ce2d8dfc5f1a19bedbae37f91ce956b1b13da89..26ad6468d13a786552f581e3f45eb444febf0b51 100644 (file)
@@ -1246,7 +1246,7 @@ config MFD_STA2X11
 
 config MFD_SUN6I_PRCM
        bool "Allwinner A31 PRCM controller"
-       depends on ARCH_SUNXI
+       depends on ARCH_SUNXI || COMPILE_TEST
        select MFD_CORE
        help
          Support for the PRCM (Power/Reset/Clock Management) unit available
index 69df27769c2136e817d3baf575269c59902752ac..43ac71691fe477f95eba6293cc6f6b2df810e243 100644 (file)
@@ -53,67 +53,67 @@ static const struct sprd_pmic_data sc2731_data = {
 static const struct mfd_cell sprd_pmic_devs[] = {
        {
                .name = "sc27xx-wdt",
-               .of_compatible = "sprd,sc27xx-wdt",
+               .of_compatible = "sprd,sc2731-wdt",
        }, {
                .name = "sc27xx-rtc",
-               .of_compatible = "sprd,sc27xx-rtc",
+               .of_compatible = "sprd,sc2731-rtc",
        }, {
                .name = "sc27xx-charger",
-               .of_compatible = "sprd,sc27xx-charger",
+               .of_compatible = "sprd,sc2731-charger",
        }, {
                .name = "sc27xx-chg-timer",
-               .of_compatible = "sprd,sc27xx-chg-timer",
+               .of_compatible = "sprd,sc2731-chg-timer",
        }, {
                .name = "sc27xx-fast-chg",
-               .of_compatible = "sprd,sc27xx-fast-chg",
+               .of_compatible = "sprd,sc2731-fast-chg",
        }, {
                .name = "sc27xx-chg-wdt",
-               .of_compatible = "sprd,sc27xx-chg-wdt",
+               .of_compatible = "sprd,sc2731-chg-wdt",
        }, {
                .name = "sc27xx-typec",
-               .of_compatible = "sprd,sc27xx-typec",
+               .of_compatible = "sprd,sc2731-typec",
        }, {
                .name = "sc27xx-flash",
-               .of_compatible = "sprd,sc27xx-flash",
+               .of_compatible = "sprd,sc2731-flash",
        }, {
                .name = "sc27xx-eic",
-               .of_compatible = "sprd,sc27xx-eic",
+               .of_compatible = "sprd,sc2731-eic",
        }, {
                .name = "sc27xx-efuse",
-               .of_compatible = "sprd,sc27xx-efuse",
+               .of_compatible = "sprd,sc2731-efuse",
        }, {
                .name = "sc27xx-thermal",
-               .of_compatible = "sprd,sc27xx-thermal",
+               .of_compatible = "sprd,sc2731-thermal",
        }, {
                .name = "sc27xx-adc",
-               .of_compatible = "sprd,sc27xx-adc",
+               .of_compatible = "sprd,sc2731-adc",
        }, {
                .name = "sc27xx-audio-codec",
-               .of_compatible = "sprd,sc27xx-audio-codec",
+               .of_compatible = "sprd,sc2731-audio-codec",
        }, {
                .name = "sc27xx-regulator",
-               .of_compatible = "sprd,sc27xx-regulator",
+               .of_compatible = "sprd,sc2731-regulator",
        }, {
                .name = "sc27xx-vibrator",
-               .of_compatible = "sprd,sc27xx-vibrator",
+               .of_compatible = "sprd,sc2731-vibrator",
        }, {
                .name = "sc27xx-keypad-led",
-               .of_compatible = "sprd,sc27xx-keypad-led",
+               .of_compatible = "sprd,sc2731-keypad-led",
        }, {
                .name = "sc27xx-bltc",
-               .of_compatible = "sprd,sc27xx-bltc",
+               .of_compatible = "sprd,sc2731-bltc",
        }, {
                .name = "sc27xx-fgu",
-               .of_compatible = "sprd,sc27xx-fgu",
+               .of_compatible = "sprd,sc2731-fgu",
        }, {
                .name = "sc27xx-7sreset",
-               .of_compatible = "sprd,sc27xx-7sreset",
+               .of_compatible = "sprd,sc2731-7sreset",
        }, {
                .name = "sc27xx-poweroff",
-               .of_compatible = "sprd,sc27xx-poweroff",
+               .of_compatible = "sprd,sc2731-poweroff",
        }, {
                .name = "sc27xx-syscon",
-               .of_compatible = "sprd,sc27xx-syscon",
+               .of_compatible = "sprd,sc2731-syscon",
        },
 };
 
index 299016bc46d909b4164708044ae21c9d5d5d14f4..104477b512a296b56e7549f001b63cd1ad9a43ac 100644 (file)
@@ -1245,6 +1245,28 @@ free:
        return status;
 }
 
+static int __maybe_unused twl_suspend(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+
+       if (client->irq)
+               disable_irq(client->irq);
+
+       return 0;
+}
+
+static int __maybe_unused twl_resume(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+
+       if (client->irq)
+               enable_irq(client->irq);
+
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(twl_dev_pm_ops, twl_suspend, twl_resume);
+
 static const struct i2c_device_id twl_ids[] = {
        { "twl4030", TWL4030_VAUX2 },   /* "Triton 2" */
        { "twl5030", 0 },               /* T2 updated */
@@ -1262,6 +1284,7 @@ static const struct i2c_device_id twl_ids[] = {
 /* One Client Driver , 4 Clients */
 static struct i2c_driver twl_driver = {
        .driver.name    = DRIVER_NAME,
+       .driver.pm      = &twl_dev_pm_ops,
        .id_table       = twl_ids,
        .probe          = twl_probe,
        .remove         = twl_remove,
index 42ab8ec92a0464ab1806a088983fc863b25f4cb4..3209ee020b153a4a539b7f4a84e60e5bf75df861 100644 (file)
@@ -496,6 +496,14 @@ config VEXPRESS_SYSCFG
          bus. System Configuration interface is one of the possible means
          of generating transactions on this bus.
 
+config ASPEED_P2A_CTRL
+       depends on (ARCH_ASPEED || COMPILE_TEST) && REGMAP && MFD_SYSCON
+       tristate "Aspeed ast2400/2500 HOST P2A VGA MMIO to BMC bridge control"
+       help
+         Control Aspeed ast2400/2500 HOST P2A VGA MMIO to BMC mappings through
+         ioctl()s, the driver also provides an interface for userspace mappings to
+         a pre-defined region.
+
 config ASPEED_LPC_CTRL
        depends on (ARCH_ASPEED || COMPILE_TEST) && REGMAP && MFD_SYSCON
        tristate "Aspeed ast2400/2500 HOST LPC to BMC bridge control"
index d5b7d3404dc78a515c597711b358b51912bd3e77..c36239573a5cab4167b1ece45e3de2972142a019 100644 (file)
@@ -56,6 +56,7 @@ obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o
 obj-$(CONFIG_CXL_BASE)         += cxl/
 obj-$(CONFIG_ASPEED_LPC_CTRL)  += aspeed-lpc-ctrl.o
 obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
+obj-$(CONFIG_ASPEED_P2A_CTRL)  += aspeed-p2a-ctrl.o
 obj-$(CONFIG_PCI_ENDPOINT_TEST)        += pci_endpoint_test.o
 obj-$(CONFIG_OCXL)             += ocxl/
 obj-y                          += cardreader/
diff --git a/drivers/misc/aspeed-p2a-ctrl.c b/drivers/misc/aspeed-p2a-ctrl.c
new file mode 100644 (file)
index 0000000..c0521b2
--- /dev/null
@@ -0,0 +1,443 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2019 Google Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Provides a simple driver to control the ASPEED P2A interface which allows
+ * the host to read and write to various regions of the BMC's memory.
+ */
+
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <linux/aspeed-p2a-ctrl.h>
+
+#define DEVICE_NAME    "aspeed-p2a-ctrl"
+
+/* SCU2C is a Misc. Control Register. */
+#define SCU2C 0x2c
+/* SCU180 is the PCIe Configuration Setting Control Register. */
+#define SCU180 0x180
+/* Bit 1 controls the P2A bridge, while bit 0 controls the entire VGA device
+ * on the PCI bus.
+ */
+#define SCU180_ENP2A BIT(1)
+
+/* The ast2400/2500 both have six ranges. */
+#define P2A_REGION_COUNT 6
+
+struct region {
+       u64 min;
+       u64 max;
+       u32 bit;
+};
+
+struct aspeed_p2a_model_data {
+       /* min, max, bit */
+       struct region regions[P2A_REGION_COUNT];
+};
+
+struct aspeed_p2a_ctrl {
+       struct miscdevice miscdev;
+       struct regmap *regmap;
+
+       const struct aspeed_p2a_model_data *config;
+
+       /* Access to these needs to be locked, held via probe, mapping ioctl,
+        * and release, remove.
+        */
+       struct mutex tracking;
+       u32 readers;
+       u32 readerwriters[P2A_REGION_COUNT];
+
+       phys_addr_t mem_base;
+       resource_size_t mem_size;
+};
+
+struct aspeed_p2a_user {
+       struct file *file;
+       struct aspeed_p2a_ctrl *parent;
+
+       /* The entire memory space is opened for reading once the bridge is
+        * enabled, therefore this needs only to be tracked once per user.
+        * If any user has it open for read, the bridge must stay enabled.
+        */
+       u32 read;
+
+       /* Each entry of the array corresponds to a P2A Region.  If the user
+        * opens for read or readwrite, the reference goes up here.  On
+        * release, this array is walked and references adjusted accordingly.
+        */
+       u32 readwrite[P2A_REGION_COUNT];
+};
+
+static void aspeed_p2a_enable_bridge(struct aspeed_p2a_ctrl *p2a_ctrl)
+{
+       regmap_update_bits(p2a_ctrl->regmap,
+               SCU180, SCU180_ENP2A, SCU180_ENP2A);
+}
+
+static void aspeed_p2a_disable_bridge(struct aspeed_p2a_ctrl *p2a_ctrl)
+{
+       regmap_update_bits(p2a_ctrl->regmap, SCU180, SCU180_ENP2A, 0);
+}
+
+static int aspeed_p2a_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       struct aspeed_p2a_user *priv = file->private_data;
+       struct aspeed_p2a_ctrl *ctrl = priv->parent;
+
+       if (ctrl->mem_base == 0 && ctrl->mem_size == 0)
+               return -EINVAL;
+
+       unsigned long vsize = vma->vm_end - vma->vm_start;
+       pgprot_t prot = vma->vm_page_prot;
+
+       if (vma->vm_pgoff + vsize > ctrl->mem_base + ctrl->mem_size)
+               return -EINVAL;
+
+       /* ast2400/2500 AHB accesses are not cache coherent */
+       prot = pgprot_noncached(prot);
+
+       if (remap_pfn_range(vma, vma->vm_start,
+               (ctrl->mem_base >> PAGE_SHIFT) + vma->vm_pgoff,
+               vsize, prot))
+               return -EAGAIN;
+
+       return 0;
+}
+
+static bool aspeed_p2a_region_acquire(struct aspeed_p2a_user *priv,
+               struct aspeed_p2a_ctrl *ctrl,
+               struct aspeed_p2a_ctrl_mapping *map)
+{
+       int i;
+       u64 base, end;
+       bool matched = false;
+
+       base = map->addr;
+       end = map->addr + (map->length - 1);
+
+       /* If the value is a legal u32, it will find a match. */
+       for (i = 0; i < P2A_REGION_COUNT; i++) {
+               const struct region *curr = &ctrl->config->regions[i];
+
+               /* If the top of this region is lower than your base, skip it.
+                */
+               if (curr->max < base)
+                       continue;
+
+               /* If the bottom of this region is higher than your end, bail.
+                */
+               if (curr->min > end)
+                       break;
+
+               /* Lock this and update it, therefore it someone else is
+                * closing their file out, this'll preserve the increment.
+                */
+               mutex_lock(&ctrl->tracking);
+               ctrl->readerwriters[i] += 1;
+               mutex_unlock(&ctrl->tracking);
+
+               /* Track with the user, so when they close their file, we can
+                * decrement properly.
+                */
+               priv->readwrite[i] += 1;
+
+               /* Enable the region as read-write. */
+               regmap_update_bits(ctrl->regmap, SCU2C, curr->bit, 0);
+               matched = true;
+       }
+
+       return matched;
+}
+
+static long aspeed_p2a_ioctl(struct file *file, unsigned int cmd,
+               unsigned long data)
+{
+       struct aspeed_p2a_user *priv = file->private_data;
+       struct aspeed_p2a_ctrl *ctrl = priv->parent;
+       void __user *arg = (void __user *)data;
+       struct aspeed_p2a_ctrl_mapping map;
+
+       if (copy_from_user(&map, arg, sizeof(map)))
+               return -EFAULT;
+
+       switch (cmd) {
+       case ASPEED_P2A_CTRL_IOCTL_SET_WINDOW:
+               /* If they want a region to be read-only, since the entire
+                * region is read-only once enabled, we just need to track this
+                * user wants to read from the bridge, and if it's not enabled.
+                * Enable it.
+                */
+               if (map.flags == ASPEED_P2A_CTRL_READ_ONLY) {
+                       mutex_lock(&ctrl->tracking);
+                       ctrl->readers += 1;
+                       mutex_unlock(&ctrl->tracking);
+
+                       /* Track with the user, so when they close their file,
+                        * we can decrement properly.
+                        */
+                       priv->read += 1;
+               } else if (map.flags == ASPEED_P2A_CTRL_READWRITE) {
+                       /* If we don't acquire any region return error. */
+                       if (!aspeed_p2a_region_acquire(priv, ctrl, &map)) {
+                               return -EINVAL;
+                       }
+               } else {
+                       /* Invalid map flags. */
+                       return -EINVAL;
+               }
+
+               aspeed_p2a_enable_bridge(ctrl);
+               return 0;
+       case ASPEED_P2A_CTRL_IOCTL_GET_MEMORY_CONFIG:
+               /* This is a request for the memory-region and corresponding
+                * length that is used by the driver for mmap.
+                */
+
+               map.flags = 0;
+               map.addr = ctrl->mem_base;
+               map.length = ctrl->mem_size;
+
+               return copy_to_user(arg, &map, sizeof(map)) ? -EFAULT : 0;
+       }
+
+       return -EINVAL;
+}
+
+
+/*
+ * When a user opens this file, we create a structure to track their mappings.
+ *
+ * A user can map a region as read-only (bridge enabled), or read-write (bit
+ * flipped, and bridge enabled).  Either way, this tracking is used, s.t. when
+ * they release the device references are handled.
+ *
+ * The bridge is not enabled until a user calls an ioctl to map a region,
+ * simply opening the device does not enable it.
+ */
+static int aspeed_p2a_open(struct inode *inode, struct file *file)
+{
+       struct aspeed_p2a_user *priv;
+
+       priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->file = file;
+       priv->read = 0;
+       memset(priv->readwrite, 0, sizeof(priv->readwrite));
+
+       /* The file's private_data is initialized to the p2a_ctrl. */
+       priv->parent = file->private_data;
+
+       /* Set the file's private_data to the user's data. */
+       file->private_data = priv;
+
+       return 0;
+}
+
+/*
+ * This will close the users mappings.  It will go through what they had opened
+ * for readwrite, and decrement those counts.  If at the end, this is the last
+ * user, it'll close the bridge.
+ */
+static int aspeed_p2a_release(struct inode *inode, struct file *file)
+{
+       int i;
+       u32 value;
+       u32 bits = 0;
+       bool open_regions = false;
+       struct aspeed_p2a_user *priv = file->private_data;
+
+       /* Lock others from changing these values until everything is updated
+        * in one pass.
+        */
+       mutex_lock(&priv->parent->tracking);
+
+       priv->parent->readers -= priv->read;
+
+       for (i = 0; i < P2A_REGION_COUNT; i++) {
+               priv->parent->readerwriters[i] -= priv->readwrite[i];
+
+               if (priv->parent->readerwriters[i] > 0)
+                       open_regions = true;
+               else
+                       bits |= priv->parent->config->regions[i].bit;
+       }
+
+       /* Setting a bit to 1 disables the region, so let's just OR with the
+        * above to disable any.
+        */
+
+       /* Note, if another user is trying to ioctl, they can't grab tracking,
+        * and therefore can't grab either register mutex.
+        * If another user is trying to close, they can't grab tracking either.
+        */
+       regmap_update_bits(priv->parent->regmap, SCU2C, bits, bits);
+
+       /* If parent->readers is zero and open windows is 0, disable the
+        * bridge.
+        */
+       if (!open_regions && priv->parent->readers == 0)
+               aspeed_p2a_disable_bridge(priv->parent);
+
+       mutex_unlock(&priv->parent->tracking);
+
+       kfree(priv);
+
+       return 0;
+}
+
+static const struct file_operations aspeed_p2a_ctrl_fops = {
+       .owner = THIS_MODULE,
+       .mmap = aspeed_p2a_mmap,
+       .unlocked_ioctl = aspeed_p2a_ioctl,
+       .open = aspeed_p2a_open,
+       .release = aspeed_p2a_release,
+};
+
+/* The regions are controlled by SCU2C */
+static void aspeed_p2a_disable_all(struct aspeed_p2a_ctrl *p2a_ctrl)
+{
+       int i;
+       u32 value = 0;
+
+       for (i = 0; i < P2A_REGION_COUNT; i++)
+               value |= p2a_ctrl->config->regions[i].bit;
+
+       regmap_update_bits(p2a_ctrl->regmap, SCU2C, value, value);
+
+       /* Disable the bridge. */
+       aspeed_p2a_disable_bridge(p2a_ctrl);
+}
+
+static int aspeed_p2a_ctrl_probe(struct platform_device *pdev)
+{
+       struct aspeed_p2a_ctrl *misc_ctrl;
+       struct device *dev;
+       struct resource *res, resm;
+       struct device_node *node;
+       int rc = 0;
+
+       dev = &pdev->dev;
+
+       misc_ctrl = devm_kzalloc(dev, sizeof(*misc_ctrl), GFP_KERNEL);
+       if (!misc_ctrl)
+               return -ENOMEM;
+
+       mutex_init(&misc_ctrl->tracking);
+
+       /* optional. */
+       node = of_parse_phandle(dev->of_node, "memory-region", 0);
+       if (node) {
+               rc = of_address_to_resource(node, 0, &resm);
+               of_node_put(node);
+               if (rc) {
+                       dev_err(dev, "Couldn't address to resource for reserved memory\n");
+                       return -ENODEV;
+               }
+
+               misc_ctrl->mem_size = resource_size(&resm);
+               misc_ctrl->mem_base = resm.start;
+       }
+
+       misc_ctrl->regmap = syscon_node_to_regmap(pdev->dev.parent->of_node);
+       if (IS_ERR(misc_ctrl->regmap)) {
+               dev_err(dev, "Couldn't get regmap\n");
+               return -ENODEV;
+       }
+
+       misc_ctrl->config = of_device_get_match_data(dev);
+
+       dev_set_drvdata(&pdev->dev, misc_ctrl);
+
+       aspeed_p2a_disable_all(misc_ctrl);
+
+       misc_ctrl->miscdev.minor = MISC_DYNAMIC_MINOR;
+       misc_ctrl->miscdev.name = DEVICE_NAME;
+       misc_ctrl->miscdev.fops = &aspeed_p2a_ctrl_fops;
+       misc_ctrl->miscdev.parent = dev;
+
+       rc = misc_register(&misc_ctrl->miscdev);
+       if (rc)
+               dev_err(dev, "Unable to register device\n");
+
+       return rc;
+}
+
+static int aspeed_p2a_ctrl_remove(struct platform_device *pdev)
+{
+       struct aspeed_p2a_ctrl *p2a_ctrl = dev_get_drvdata(&pdev->dev);
+
+       misc_deregister(&p2a_ctrl->miscdev);
+
+       return 0;
+}
+
+#define SCU2C_DRAM     BIT(25)
+#define SCU2C_SPI      BIT(24)
+#define SCU2C_SOC      BIT(23)
+#define SCU2C_FLASH    BIT(22)
+
+static const struct aspeed_p2a_model_data ast2400_model_data = {
+       .regions = {
+               {0x00000000, 0x17FFFFFF, SCU2C_FLASH},
+               {0x18000000, 0x1FFFFFFF, SCU2C_SOC},
+               {0x20000000, 0x2FFFFFFF, SCU2C_FLASH},
+               {0x30000000, 0x3FFFFFFF, SCU2C_SPI},
+               {0x40000000, 0x5FFFFFFF, SCU2C_DRAM},
+               {0x60000000, 0xFFFFFFFF, SCU2C_SOC},
+       }
+};
+
+static const struct aspeed_p2a_model_data ast2500_model_data = {
+       .regions = {
+               {0x00000000, 0x0FFFFFFF, SCU2C_FLASH},
+               {0x10000000, 0x1FFFFFFF, SCU2C_SOC},
+               {0x20000000, 0x3FFFFFFF, SCU2C_FLASH},
+               {0x40000000, 0x5FFFFFFF, SCU2C_SOC},
+               {0x60000000, 0x7FFFFFFF, SCU2C_SPI},
+               {0x80000000, 0xFFFFFFFF, SCU2C_DRAM},
+       }
+};
+
+static const struct of_device_id aspeed_p2a_ctrl_match[] = {
+       { .compatible = "aspeed,ast2400-p2a-ctrl",
+         .data = &ast2400_model_data },
+       { .compatible = "aspeed,ast2500-p2a-ctrl",
+         .data = &ast2500_model_data },
+       { },
+};
+
+static struct platform_driver aspeed_p2a_ctrl_driver = {
+       .driver = {
+               .name           = DEVICE_NAME,
+               .of_match_table = aspeed_p2a_ctrl_match,
+       },
+       .probe = aspeed_p2a_ctrl_probe,
+       .remove = aspeed_p2a_ctrl_remove,
+};
+
+module_platform_driver(aspeed_p2a_ctrl_driver);
+
+MODULE_DEVICE_TABLE(of, aspeed_p2a_ctrl_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick Venture <venture@google.com>");
+MODULE_DESCRIPTION("Control for aspeed 2400/2500 P2A VGA HOST to BMC mappings");
index da22bcb62b0482751c2208268328e6cdda4f7d02..4a2589bdc8ce6b94fce87eb1c55818b2bf2481fc 100644 (file)
@@ -455,13 +455,13 @@ static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr)
                pcr_dbg(pcr, "Set parameters for L1.2.");
                rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
                                        0xFF, PCIE_L1_2_EN);
-       rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
+               rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
                                        RTS5260_DVCC_OCP_EN |
                                        RTS5260_DVCC_OCP_CL_EN,
                                        RTS5260_DVCC_OCP_EN |
                                        RTS5260_DVCC_OCP_CL_EN);
 
-       rtsx_pci_write_register(pcr, PWR_FE_CTL,
+               rtsx_pci_write_register(pcr, PWR_FE_CTL,
                                        0xFF, PCIE_L1_2_PD_FE_EN);
        } else if (lss_l1_1) {
                pcr_dbg(pcr, "Set parameters for L1.1.");
index 39f832d2728899a8575763834d53bb438ae262bd..98603e235cf04a1625eca271deb68e73b36efef3 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/of.h>
+#include <linux/sort.h>
 #include <linux/of_platform.h>
 #include <linux/rpmsg.h>
 #include <linux/scatterlist.h>
@@ -31,7 +32,7 @@
 #define FASTRPC_CTX_MAX (256)
 #define FASTRPC_INIT_HANDLE    1
 #define FASTRPC_CTXID_MASK (0xFF0)
-#define INIT_FILELEN_MAX (2 * 1024 * 1024)
+#define INIT_FILELEN_MAX (64 * 1024 * 1024)
 #define INIT_MEMLEN_MAX  (8 * 1024 * 1024)
 #define FASTRPC_DEVICE_NAME    "fastrpc"
 
@@ -104,6 +105,15 @@ struct fastrpc_invoke_rsp {
        int retval;             /* invoke return value */
 };
 
+struct fastrpc_buf_overlap {
+       u64 start;
+       u64 end;
+       int raix;
+       u64 mstart;
+       u64 mend;
+       u64 offset;
+};
+
 struct fastrpc_buf {
        struct fastrpc_user *fl;
        struct dma_buf *dmabuf;
@@ -149,12 +159,14 @@ struct fastrpc_invoke_ctx {
        struct kref refcount;
        struct list_head node; /* list of ctxs */
        struct completion work;
+       struct work_struct put_work;
        struct fastrpc_msg msg;
        struct fastrpc_user *fl;
        struct fastrpc_remote_arg *rpra;
        struct fastrpc_map **maps;
        struct fastrpc_buf *buf;
        struct fastrpc_invoke_args *args;
+       struct fastrpc_buf_overlap *olaps;
        struct fastrpc_channel_ctx *cctx;
 };
 
@@ -282,6 +294,7 @@ static void fastrpc_context_free(struct kref *ref)
 {
        struct fastrpc_invoke_ctx *ctx;
        struct fastrpc_channel_ctx *cctx;
+       unsigned long flags;
        int i;
 
        ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
@@ -293,11 +306,12 @@ static void fastrpc_context_free(struct kref *ref)
        if (ctx->buf)
                fastrpc_buf_free(ctx->buf);
 
-       spin_lock(&cctx->lock);
+       spin_lock_irqsave(&cctx->lock, flags);
        idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
-       spin_unlock(&cctx->lock);
+       spin_unlock_irqrestore(&cctx->lock, flags);
 
        kfree(ctx->maps);
+       kfree(ctx->olaps);
        kfree(ctx);
 }
 
@@ -311,12 +325,70 @@ static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
        kref_put(&ctx->refcount, fastrpc_context_free);
 }
 
+static void fastrpc_context_put_wq(struct work_struct *work)
+{
+       struct fastrpc_invoke_ctx *ctx =
+                       container_of(work, struct fastrpc_invoke_ctx, put_work);
+
+       fastrpc_context_put(ctx);
+}
+
+#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
+static int olaps_cmp(const void *a, const void *b)
+{
+       struct fastrpc_buf_overlap *pa = (struct fastrpc_buf_overlap *)a;
+       struct fastrpc_buf_overlap *pb = (struct fastrpc_buf_overlap *)b;
+       /* sort with lowest starting buffer first */
+       int st = CMP(pa->start, pb->start);
+       /* sort with highest ending buffer first */
+       int ed = CMP(pb->end, pa->end);
+
+       return st == 0 ? ed : st;
+}
+
+static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx *ctx)
+{
+       u64 max_end = 0;
+       int i;
+
+       for (i = 0; i < ctx->nbufs; ++i) {
+               ctx->olaps[i].start = ctx->args[i].ptr;
+               ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length;
+               ctx->olaps[i].raix = i;
+       }
+
+       sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL);
+
+       for (i = 0; i < ctx->nbufs; ++i) {
+               /* Falling inside previous range */
+               if (ctx->olaps[i].start < max_end) {
+                       ctx->olaps[i].mstart = max_end;
+                       ctx->olaps[i].mend = ctx->olaps[i].end;
+                       ctx->olaps[i].offset = max_end - ctx->olaps[i].start;
+
+                       if (ctx->olaps[i].end > max_end) {
+                               max_end = ctx->olaps[i].end;
+                       } else {
+                               ctx->olaps[i].mend = 0;
+                               ctx->olaps[i].mstart = 0;
+                       }
+
+               } else  {
+                       ctx->olaps[i].mend = ctx->olaps[i].end;
+                       ctx->olaps[i].mstart = ctx->olaps[i].start;
+                       ctx->olaps[i].offset = 0;
+                       max_end = ctx->olaps[i].end;
+               }
+       }
+}
+
 static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
                        struct fastrpc_user *user, u32 kernel, u32 sc,
                        struct fastrpc_invoke_args *args)
 {
        struct fastrpc_channel_ctx *cctx = user->cctx;
        struct fastrpc_invoke_ctx *ctx = NULL;
+       unsigned long flags;
        int ret;
 
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -336,7 +408,15 @@ static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
                        kfree(ctx);
                        return ERR_PTR(-ENOMEM);
                }
+               ctx->olaps = kcalloc(ctx->nscalars,
+                                   sizeof(*ctx->olaps), GFP_KERNEL);
+               if (!ctx->olaps) {
+                       kfree(ctx->maps);
+                       kfree(ctx);
+                       return ERR_PTR(-ENOMEM);
+               }
                ctx->args = args;
+               fastrpc_get_buff_overlaps(ctx);
        }
 
        ctx->sc = sc;
@@ -345,20 +425,21 @@ static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
        ctx->tgid = user->tgid;
        ctx->cctx = cctx;
        init_completion(&ctx->work);
+       INIT_WORK(&ctx->put_work, fastrpc_context_put_wq);
 
        spin_lock(&user->lock);
        list_add_tail(&ctx->node, &user->pending);
        spin_unlock(&user->lock);
 
-       spin_lock(&cctx->lock);
+       spin_lock_irqsave(&cctx->lock, flags);
        ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
                               FASTRPC_CTX_MAX, GFP_ATOMIC);
        if (ret < 0) {
-               spin_unlock(&cctx->lock);
+               spin_unlock_irqrestore(&cctx->lock, flags);
                goto err_idr;
        }
        ctx->ctxid = ret << 4;
-       spin_unlock(&cctx->lock);
+       spin_unlock_irqrestore(&cctx->lock, flags);
 
        kref_init(&ctx->refcount);
 
@@ -368,6 +449,7 @@ err_idr:
        list_del(&ctx->node);
        spin_unlock(&user->lock);
        kfree(ctx->maps);
+       kfree(ctx->olaps);
        kfree(ctx);
 
        return ERR_PTR(ret);
@@ -586,8 +668,11 @@ static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
        size = ALIGN(metalen, FASTRPC_ALIGN);
        for (i = 0; i < ctx->nscalars; i++) {
                if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
-                       size = ALIGN(size, FASTRPC_ALIGN);
-                       size += ctx->args[i].length;
+
+                       if (ctx->olaps[i].offset == 0)
+                               size = ALIGN(size, FASTRPC_ALIGN);
+
+                       size += (ctx->olaps[i].mend - ctx->olaps[i].mstart);
                }
        }
 
@@ -625,12 +710,12 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
        struct fastrpc_remote_arg *rpra;
        struct fastrpc_invoke_buf *list;
        struct fastrpc_phy_page *pages;
-       int inbufs, i, err = 0;
-       u64 rlen, pkt_size;
+       int inbufs, i, oix, err = 0;
+       u64 len, rlen, pkt_size;
+       u64 pg_start, pg_end;
        uintptr_t args;
        int metalen;
 
-
        inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
        metalen = fastrpc_get_meta_size(ctx);
        pkt_size = fastrpc_get_payload_size(ctx, metalen);
@@ -653,8 +738,11 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
        rlen = pkt_size - metalen;
        ctx->rpra = rpra;
 
-       for (i = 0; i < ctx->nbufs; ++i) {
-               u64 len = ctx->args[i].length;
+       for (oix = 0; oix < ctx->nbufs; ++oix) {
+               int mlen;
+
+               i = ctx->olaps[oix].raix;
+               len = ctx->args[i].length;
 
                rpra[i].pv = 0;
                rpra[i].len = len;
@@ -664,22 +752,45 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
                if (!len)
                        continue;
 
-               pages[i].size = roundup(len, PAGE_SIZE);
-
                if (ctx->maps[i]) {
+                       struct vm_area_struct *vma = NULL;
+
                        rpra[i].pv = (u64) ctx->args[i].ptr;
                        pages[i].addr = ctx->maps[i]->phys;
+
+                       vma = find_vma(current->mm, ctx->args[i].ptr);
+                       if (vma)
+                               pages[i].addr += ctx->args[i].ptr -
+                                                vma->vm_start;
+
+                       pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
+                       pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
+                                 PAGE_SHIFT;
+                       pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
+
                } else {
-                       rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
-                       args = ALIGN(args, FASTRPC_ALIGN);
-                       if (rlen < len)
+
+                       if (ctx->olaps[oix].offset == 0) {
+                               rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
+                               args = ALIGN(args, FASTRPC_ALIGN);
+                       }
+
+                       mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart;
+
+                       if (rlen < mlen)
                                goto bail;
 
-                       rpra[i].pv = args;
-                       pages[i].addr = ctx->buf->phys + (pkt_size - rlen);
+                       rpra[i].pv = args - ctx->olaps[oix].offset;
+                       pages[i].addr = ctx->buf->phys -
+                                       ctx->olaps[oix].offset +
+                                       (pkt_size - rlen);
                        pages[i].addr = pages[i].addr & PAGE_MASK;
-                       args = args + len;
-                       rlen -= len;
+
+                       pg_start = (args & PAGE_MASK) >> PAGE_SHIFT;
+                       pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
+                       pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
+                       args = args + mlen;
+                       rlen -= mlen;
                }
 
                if (i < inbufs && !ctx->maps[i]) {
@@ -782,6 +893,9 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl,  u32 kernel,
                if (err)
                        goto bail;
        }
+
+       /* make sure that all CPU memory writes are seen by DSP */
+       dma_wmb();
        /* Send invoke buffer to remote dsp */
        err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
        if (err)
@@ -798,6 +912,8 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl,  u32 kernel,
                goto bail;
 
        if (ctx->nscalars) {
+               /* make sure that all memory writes by DSP are seen by CPU */
+               dma_rmb();
                /* populate all the output buffers with results */
                err = fastrpc_put_args(ctx, kernel);
                if (err)
@@ -843,12 +959,12 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
 
        if (copy_from_user(&init, argp, sizeof(init))) {
                err = -EFAULT;
-               goto bail;
+               goto err;
        }
 
        if (init.filelen > INIT_FILELEN_MAX) {
                err = -EINVAL;
-               goto bail;
+               goto err;
        }
 
        inbuf.pgid = fl->tgid;
@@ -862,17 +978,15 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
        if (init.filelen && init.filefd) {
                err = fastrpc_map_create(fl, init.filefd, init.filelen, &map);
                if (err)
-                       goto bail;
+                       goto err;
        }
 
        memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
                       1024 * 1024);
        err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
                                &imem);
-       if (err) {
-               fastrpc_map_put(map);
-               goto bail;
-       }
+       if (err)
+               goto err_alloc;
 
        fl->init_mem = imem;
        args[0].ptr = (u64)(uintptr_t)&inbuf;
@@ -908,13 +1022,24 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
 
        err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
                                      sc, args);
+       if (err)
+               goto err_invoke;
 
-       if (err) {
+       kfree(args);
+
+       return 0;
+
+err_invoke:
+       fl->init_mem = NULL;
+       fastrpc_buf_free(imem);
+err_alloc:
+       if (map) {
+               spin_lock(&fl->lock);
+               list_del(&map->node);
+               spin_unlock(&fl->lock);
                fastrpc_map_put(map);
-               fastrpc_buf_free(imem);
        }
-
-bail:
+err:
        kfree(args);
 
        return err;
@@ -924,9 +1049,10 @@ static struct fastrpc_session_ctx *fastrpc_session_alloc(
                                        struct fastrpc_channel_ctx *cctx)
 {
        struct fastrpc_session_ctx *session = NULL;
+       unsigned long flags;
        int i;
 
-       spin_lock(&cctx->lock);
+       spin_lock_irqsave(&cctx->lock, flags);
        for (i = 0; i < cctx->sesscount; i++) {
                if (!cctx->session[i].used && cctx->session[i].valid) {
                        cctx->session[i].used = true;
@@ -934,7 +1060,7 @@ static struct fastrpc_session_ctx *fastrpc_session_alloc(
                        break;
                }
        }
-       spin_unlock(&cctx->lock);
+       spin_unlock_irqrestore(&cctx->lock, flags);
 
        return session;
 }
@@ -942,9 +1068,11 @@ static struct fastrpc_session_ctx *fastrpc_session_alloc(
 static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
                                 struct fastrpc_session_ctx *session)
 {
-       spin_lock(&cctx->lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&cctx->lock, flags);
        session->used = false;
-       spin_unlock(&cctx->lock);
+       spin_unlock_irqrestore(&cctx->lock, flags);
 }
 
 static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
@@ -970,12 +1098,13 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
        struct fastrpc_channel_ctx *cctx = fl->cctx;
        struct fastrpc_invoke_ctx *ctx, *n;
        struct fastrpc_map *map, *m;
+       unsigned long flags;
 
        fastrpc_release_current_dsp_process(fl);
 
-       spin_lock(&cctx->lock);
+       spin_lock_irqsave(&cctx->lock, flags);
        list_del(&fl->user);
-       spin_unlock(&cctx->lock);
+       spin_unlock_irqrestore(&cctx->lock, flags);
 
        if (fl->init_mem)
                fastrpc_buf_free(fl->init_mem);
@@ -1003,6 +1132,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
 {
        struct fastrpc_channel_ctx *cctx = miscdev_to_cctx(filp->private_data);
        struct fastrpc_user *fl = NULL;
+       unsigned long flags;
 
        fl = kzalloc(sizeof(*fl), GFP_KERNEL);
        if (!fl)
@@ -1026,9 +1156,9 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
                return -EBUSY;
        }
 
-       spin_lock(&cctx->lock);
+       spin_lock_irqsave(&cctx->lock, flags);
        list_add_tail(&fl->user, &cctx->users);
-       spin_unlock(&cctx->lock);
+       spin_unlock_irqrestore(&cctx->lock, flags);
 
        return 0;
 }
@@ -1184,6 +1314,8 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
        struct fastrpc_session_ctx *sess;
        struct device *dev = &pdev->dev;
        int i, sessions = 0;
+       unsigned long flags;
+       int rc;
 
        cctx = dev_get_drvdata(dev->parent);
        if (!cctx)
@@ -1191,7 +1323,7 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
 
        of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
 
-       spin_lock(&cctx->lock);
+       spin_lock_irqsave(&cctx->lock, flags);
        sess = &cctx->session[cctx->sesscount];
        sess->used = false;
        sess->valid = true;
@@ -1212,8 +1344,12 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
                }
        }
        cctx->sesscount++;
-       spin_unlock(&cctx->lock);
-       dma_set_mask(dev, DMA_BIT_MASK(32));
+       spin_unlock_irqrestore(&cctx->lock, flags);
+       rc = dma_set_mask(dev, DMA_BIT_MASK(32));
+       if (rc) {
+               dev_err(dev, "32-bit DMA enable failed\n");
+               return rc;
+       }
 
        return 0;
 }
@@ -1222,16 +1358,17 @@ static int fastrpc_cb_remove(struct platform_device *pdev)
 {
        struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
        struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
+       unsigned long flags;
        int i;
 
-       spin_lock(&cctx->lock);
+       spin_lock_irqsave(&cctx->lock, flags);
        for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
                if (cctx->session[i].sid == sess->sid) {
                        cctx->session[i].valid = false;
                        cctx->sesscount--;
                }
        }
-       spin_unlock(&cctx->lock);
+       spin_unlock_irqrestore(&cctx->lock, flags);
 
        return 0;
 }
@@ -1313,11 +1450,12 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
 {
        struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
        struct fastrpc_user *user;
+       unsigned long flags;
 
-       spin_lock(&cctx->lock);
+       spin_lock_irqsave(&cctx->lock, flags);
        list_for_each_entry(user, &cctx->users, user)
                fastrpc_notify_users(user);
-       spin_unlock(&cctx->lock);
+       spin_unlock_irqrestore(&cctx->lock, flags);
 
        misc_deregister(&cctx->miscdev);
        of_platform_depopulate(&rpdev->dev);
@@ -1349,7 +1487,13 @@ static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
 
        ctx->retval = rsp->retval;
        complete(&ctx->work);
-       fastrpc_context_put(ctx);
+
+       /*
+        * The DMA buffer associated with the context cannot be freed in
+        * interrupt context so schedule it through a worker thread to
+        * avoid a kernel BUG.
+        */
+       schedule_work(&ctx->put_work);
 
        return 0;
 }
index c6592db59b251b3020d05c7c4f10f5839de16e2b..f8e85243d672382d714b5d08cd947b34ecf6f702 100644 (file)
@@ -6,7 +6,7 @@ obj-m   := habanalabs.o
 
 habanalabs-y := habanalabs_drv.o device.o context.o asid.o habanalabs_ioctl.o \
                command_buffer.o hw_queue.o irq.o sysfs.o hwmon.o memory.o \
-               command_submission.o mmu.o
+               command_submission.o mmu.o firmware_if.o pci.o
 
 habanalabs-$(CONFIG_DEBUG_FS) += debugfs.o
 
index 85f75806a9a7ddb1ab2a26ccec4d8a3b7f04678a..b1ffca47d7484b81377e4e519149b6c1ca019599 100644 (file)
@@ -214,6 +214,13 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
        u64 handle;
        int rc;
 
+       if (hl_device_disabled_or_in_reset(hdev)) {
+               dev_warn_ratelimited(hdev->dev,
+                       "Device is %s. Can't execute CB IOCTL\n",
+                       atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
+               return -EBUSY;
+       }
+
        switch (args->in.op) {
        case HL_CB_OP_CREATE:
                rc = hl_cb_create(hdev, &hpriv->cb_mgr, args->in.cb_size,
index 19c84214a7ea8890543ea8341033ed1ceb89df12..02c48da0b64523f1181f2a87d7c24b721ce3bb0b 100644 (file)
@@ -261,7 +261,8 @@ static void cs_timedout(struct work_struct *work)
        ctx_asid = cs->ctx->asid;
 
        /* TODO: add information about last signaled seq and last emitted seq */
-       dev_err(hdev->dev, "CS %d.%llu got stuck!\n", ctx_asid, cs->sequence);
+       dev_err(hdev->dev, "User %d command submission %llu got stuck!\n",
+               ctx_asid, cs->sequence);
 
        cs_put(cs);
 
@@ -604,7 +605,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
        bool need_soft_reset = false;
 
        if (hl_device_disabled_or_in_reset(hdev)) {
-               dev_warn(hdev->dev,
+               dev_warn_ratelimited(hdev->dev,
                        "Device is %s. Can't submit new CS\n",
                        atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
                rc = -EBUSY;
index 974a87789bd8689d1530daa8890bac3b3b32d38c..a4447699ff4e2a242592396fb908fdbb75197e87 100644 (file)
@@ -505,22 +505,97 @@ err:
        return -EINVAL;
 }
 
+static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
+                               u64 *phys_addr)
+{
+       struct hl_ctx *ctx = hdev->user_ctx;
+       u64 hop_addr, hop_pte_addr, hop_pte;
+       int rc = 0;
+
+       if (!ctx) {
+               dev_err(hdev->dev, "no ctx available\n");
+               return -EINVAL;
+       }
+
+       mutex_lock(&ctx->mmu_lock);
+
+       /* hop 0 */
+       hop_addr = get_hop0_addr(ctx);
+       hop_pte_addr = get_hop0_pte_addr(ctx, hop_addr, virt_addr);
+       hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
+
+       /* hop 1 */
+       hop_addr = get_next_hop_addr(hop_pte);
+       if (hop_addr == ULLONG_MAX)
+               goto not_mapped;
+       hop_pte_addr = get_hop1_pte_addr(ctx, hop_addr, virt_addr);
+       hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
+
+       /* hop 2 */
+       hop_addr = get_next_hop_addr(hop_pte);
+       if (hop_addr == ULLONG_MAX)
+               goto not_mapped;
+       hop_pte_addr = get_hop2_pte_addr(ctx, hop_addr, virt_addr);
+       hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
+
+       /* hop 3 */
+       hop_addr = get_next_hop_addr(hop_pte);
+       if (hop_addr == ULLONG_MAX)
+               goto not_mapped;
+       hop_pte_addr = get_hop3_pte_addr(ctx, hop_addr, virt_addr);
+       hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
+
+       if (!(hop_pte & LAST_MASK)) {
+               /* hop 4 */
+               hop_addr = get_next_hop_addr(hop_pte);
+               if (hop_addr == ULLONG_MAX)
+                       goto not_mapped;
+               hop_pte_addr = get_hop4_pte_addr(ctx, hop_addr, virt_addr);
+               hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
+       }
+
+       if (!(hop_pte & PAGE_PRESENT_MASK))
+               goto not_mapped;
+
+       *phys_addr = (hop_pte & PTE_PHYS_ADDR_MASK) | (virt_addr & OFFSET_MASK);
+
+       goto out;
+
+not_mapped:
+       dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
+                       virt_addr);
+       rc = -EINVAL;
+out:
+       mutex_unlock(&ctx->mmu_lock);
+       return rc;
+}
+
 static ssize_t hl_data_read32(struct file *f, char __user *buf,
                                        size_t count, loff_t *ppos)
 {
        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
        struct hl_device *hdev = entry->hdev;
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
        char tmp_buf[32];
+       u64 addr = entry->addr;
        u32 val;
        ssize_t rc;
 
        if (*ppos)
                return 0;
 
-       rc = hdev->asic_funcs->debugfs_read32(hdev, entry->addr, &val);
+       if (addr >= prop->va_space_dram_start_address &&
+                       addr < prop->va_space_dram_end_address &&
+                       hdev->mmu_enable &&
+                       hdev->dram_supports_virtual_memory) {
+               rc = device_va_to_pa(hdev, entry->addr, &addr);
+               if (rc)
+                       return rc;
+       }
+
+       rc = hdev->asic_funcs->debugfs_read32(hdev, addr, &val);
        if (rc) {
-               dev_err(hdev->dev, "Failed to read from 0x%010llx\n",
-                       entry->addr);
+               dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr);
                return rc;
        }
 
@@ -536,6 +611,8 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
 {
        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
        struct hl_device *hdev = entry->hdev;
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
+       u64 addr = entry->addr;
        u32 value;
        ssize_t rc;
 
@@ -543,10 +620,19 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
        if (rc)
                return rc;
 
-       rc = hdev->asic_funcs->debugfs_write32(hdev, entry->addr, value);
+       if (addr >= prop->va_space_dram_start_address &&
+                       addr < prop->va_space_dram_end_address &&
+                       hdev->mmu_enable &&
+                       hdev->dram_supports_virtual_memory) {
+               rc = device_va_to_pa(hdev, entry->addr, &addr);
+               if (rc)
+                       return rc;
+       }
+
+       rc = hdev->asic_funcs->debugfs_write32(hdev, addr, value);
        if (rc) {
                dev_err(hdev->dev, "Failed to write 0x%08x to 0x%010llx\n",
-                       value, entry->addr);
+                       value, addr);
                return rc;
        }
 
index 77d51be66c7e84045558fff78eea0a8e9a70439e..25bfb093ff26fe47dccb277725fc2a5af13e9b70 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/pci.h>
 #include <linux/sched/signal.h>
 #include <linux/hwmon.h>
+#include <uapi/misc/habanalabs.h>
 
 #define HL_PLDM_PENDING_RESET_PER_SEC  (HL_PENDING_RESET_PER_SEC * 10)
 
@@ -21,6 +22,20 @@ bool hl_device_disabled_or_in_reset(struct hl_device *hdev)
                return false;
 }
 
+enum hl_device_status hl_device_status(struct hl_device *hdev)
+{
+       enum hl_device_status status;
+
+       if (hdev->disabled)
+               status = HL_DEVICE_STATUS_MALFUNCTION;
+       else if (atomic_read(&hdev->in_reset))
+               status = HL_DEVICE_STATUS_IN_RESET;
+       else
+               status = HL_DEVICE_STATUS_OPERATIONAL;
+
+       return status;
+};
+
 static void hpriv_release(struct kref *ref)
 {
        struct hl_fpriv *hpriv;
@@ -498,11 +513,8 @@ disable_device:
        return rc;
 }
 
-static void hl_device_hard_reset_pending(struct work_struct *work)
+static void device_kill_open_processes(struct hl_device *hdev)
 {
-       struct hl_device_reset_work *device_reset_work =
-               container_of(work, struct hl_device_reset_work, reset_work);
-       struct hl_device *hdev = device_reset_work->hdev;
        u16 pending_total, pending_cnt;
        struct task_struct *task = NULL;
 
@@ -537,6 +549,12 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
                }
        }
 
+       /* We killed the open users, but because the driver cleans up after the
+        * user contexts are closed (e.g. mmu mappings), we need to wait again
+        * to make sure the cleaning phase is finished before continuing with
+        * the reset
+        */
+
        pending_cnt = pending_total;
 
        while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
@@ -552,6 +570,16 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
 
        mutex_unlock(&hdev->fd_open_cnt_lock);
 
+}
+
+static void device_hard_reset_pending(struct work_struct *work)
+{
+       struct hl_device_reset_work *device_reset_work =
+               container_of(work, struct hl_device_reset_work, reset_work);
+       struct hl_device *hdev = device_reset_work->hdev;
+
+       device_kill_open_processes(hdev);
+
        hl_device_reset(hdev, true, true);
 
        kfree(device_reset_work);
@@ -613,6 +641,8 @@ again:
        if ((hard_reset) && (!from_hard_reset_thread)) {
                struct hl_device_reset_work *device_reset_work;
 
+               hdev->hard_reset_pending = true;
+
                if (!hdev->pdev) {
                        dev_err(hdev->dev,
                                "Reset action is NOT supported in simulator\n");
@@ -620,8 +650,6 @@ again:
                        goto out_err;
                }
 
-               hdev->hard_reset_pending = true;
-
                device_reset_work = kzalloc(sizeof(*device_reset_work),
                                                GFP_ATOMIC);
                if (!device_reset_work) {
@@ -635,7 +663,7 @@ again:
                 * from a dedicated work
                 */
                INIT_WORK(&device_reset_work->reset_work,
-                               hl_device_hard_reset_pending);
+                               device_hard_reset_pending);
                device_reset_work->hdev = hdev;
                schedule_work(&device_reset_work->reset_work);
 
@@ -663,17 +691,9 @@ again:
        /* Go over all the queues, release all CS and their jobs */
        hl_cs_rollback_all(hdev);
 
-       if (hard_reset) {
-               /* Release kernel context */
-               if (hl_ctx_put(hdev->kernel_ctx) != 1) {
-                       dev_err(hdev->dev,
-                               "kernel ctx is alive during hard reset\n");
-                       rc = -EBUSY;
-                       goto out_err;
-               }
-
+       /* Release kernel context */
+       if ((hard_reset) && (hl_ctx_put(hdev->kernel_ctx) == 1))
                hdev->kernel_ctx = NULL;
-       }
 
        /* Reset the H/W. It will be in idle state after this returns */
        hdev->asic_funcs->hw_fini(hdev, hard_reset);
@@ -698,6 +718,14 @@ again:
 
        if (hard_reset) {
                hdev->device_cpu_disabled = false;
+               hdev->hard_reset_pending = false;
+
+               if (hdev->kernel_ctx) {
+                       dev_crit(hdev->dev,
+                               "kernel ctx was alive during hard reset, something is terribly wrong\n");
+                       rc = -EBUSY;
+                       goto out_err;
+               }
 
                /* Allocate the kernel context */
                hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx),
@@ -752,8 +780,6 @@ again:
                }
 
                hl_set_max_power(hdev, hdev->max_power);
-
-               hdev->hard_reset_pending = false;
        } else {
                rc = hdev->asic_funcs->soft_reset_late_init(hdev);
                if (rc) {
@@ -1030,11 +1056,22 @@ void hl_device_fini(struct hl_device *hdev)
                        WARN(1, "Failed to remove device because reset function did not finish\n");
                        return;
                }
-       };
+       }
 
        /* Mark device as disabled */
        hdev->disabled = true;
 
+       /*
+        * Flush anyone that is inside the critical section of enqueue
+        * jobs to the H/W
+        */
+       hdev->asic_funcs->hw_queues_lock(hdev);
+       hdev->asic_funcs->hw_queues_unlock(hdev);
+
+       hdev->hard_reset_pending = true;
+
+       device_kill_open_processes(hdev);
+
        hl_hwmon_fini(hdev);
 
        device_late_fini(hdev);
diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/firmware_if.c
new file mode 100644 (file)
index 0000000..1acf826
--- /dev/null
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+
+#include <linux/firmware.h>
+#include <linux/genalloc.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+/**
+ * hl_fw_push_fw_to_device() - Push FW code to device.
+ * @hdev: pointer to hl_device structure.
+ *
+ * Copy fw code from firmware file to device memory.
+ *
+ * Return: 0 on success, non-zero for failure.
+ */
+int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
+                               void __iomem *dst)
+{
+       const struct firmware *fw;
+       const u64 *fw_data;
+       size_t fw_size, i;
+       int rc;
+
+       rc = request_firmware(&fw, fw_name, hdev->dev);
+       if (rc) {
+               dev_err(hdev->dev, "Failed to request %s\n", fw_name);
+               goto out;
+       }
+
+       fw_size = fw->size;
+       if ((fw_size % 4) != 0) {
+               dev_err(hdev->dev, "illegal %s firmware size %zu\n",
+                       fw_name, fw_size);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
+
+       fw_data = (const u64 *) fw->data;
+
+       if ((fw->size % 8) != 0)
+               fw_size -= 8;
+
+       for (i = 0 ; i < fw_size ; i += 8, fw_data++, dst += 8) {
+               if (!(i & (0x80000 - 1))) {
+                       dev_dbg(hdev->dev,
+                               "copied so far %zu out of %zu for %s firmware",
+                               i, fw_size, fw_name);
+                       usleep_range(20, 100);
+               }
+
+               writeq(*fw_data, dst);
+       }
+
+       if ((fw->size % 8) != 0)
+               writel(*(const u32 *) fw_data, dst);
+
+out:
+       release_firmware(fw);
+       return rc;
+}
+
+int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
+{
+       struct armcp_packet pkt = {};
+
+       pkt.ctl = cpu_to_le32(opcode << ARMCP_PKT_CTL_OPCODE_SHIFT);
+
+       return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt,
+                               sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL);
+}
+
+int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
+                               u16 len, u32 timeout, long *result)
+{
+       struct armcp_packet *pkt;
+       dma_addr_t pkt_dma_addr;
+       u32 tmp;
+       int rc = 0;
+
+       if (len > HL_CPU_CB_SIZE) {
+               dev_err(hdev->dev, "Invalid CPU message size of %d bytes\n",
+                       len);
+               return -ENOMEM;
+       }
+
+       pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len,
+                                                               &pkt_dma_addr);
+       if (!pkt) {
+               dev_err(hdev->dev,
+                       "Failed to allocate DMA memory for packet to CPU\n");
+               return -ENOMEM;
+       }
+
+       memcpy(pkt, msg, len);
+
+       mutex_lock(&hdev->send_cpu_message_lock);
+
+       if (hdev->disabled)
+               goto out;
+
+       if (hdev->device_cpu_disabled) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, len, pkt_dma_addr);
+       if (rc) {
+               dev_err(hdev->dev, "Failed to send CB on CPU PQ (%d)\n", rc);
+               goto out;
+       }
+
+       rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) &pkt->fence,
+                                       timeout, &tmp);
+
+       hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
+
+       if (rc == -ETIMEDOUT) {
+               dev_err(hdev->dev, "Timeout while waiting for device CPU\n");
+               hdev->device_cpu_disabled = true;
+               goto out;
+       }
+
+       if (tmp == ARMCP_PACKET_FENCE_VAL) {
+               u32 ctl = le32_to_cpu(pkt->ctl);
+
+               rc = (ctl & ARMCP_PKT_CTL_RC_MASK) >> ARMCP_PKT_CTL_RC_SHIFT;
+               if (rc) {
+                       dev_err(hdev->dev,
+                               "F/W ERROR %d for CPU packet %d\n",
+                               rc, (ctl & ARMCP_PKT_CTL_OPCODE_MASK)
+                                               >> ARMCP_PKT_CTL_OPCODE_SHIFT);
+                       rc = -EINVAL;
+               } else if (result) {
+                       *result = (long) le64_to_cpu(pkt->result);
+               }
+       } else {
+               dev_err(hdev->dev, "CPU packet wrong fence value\n");
+               rc = -EINVAL;
+       }
+
+out:
+       mutex_unlock(&hdev->send_cpu_message_lock);
+
+       hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, len, pkt);
+
+       return rc;
+}
+
+int hl_fw_test_cpu_queue(struct hl_device *hdev)
+{
+       struct armcp_packet test_pkt = {};
+       long result;
+       int rc;
+
+       test_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST <<
+                                       ARMCP_PKT_CTL_OPCODE_SHIFT);
+       test_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
+
+       rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
+                       sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
+
+       if (!rc) {
+               if (result == ARMCP_PACKET_FENCE_VAL)
+                       dev_info(hdev->dev,
+                               "queue test on CPU queue succeeded\n");
+               else
+                       dev_err(hdev->dev,
+                               "CPU queue test failed (0x%08lX)\n", result);
+       } else {
+               dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc);
+       }
+
+       return rc;
+}
+
+void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
+                                               dma_addr_t *dma_handle)
+{
+       u64 kernel_addr;
+
+       /* roundup to HL_CPU_PKT_SIZE */
+       size = (size + (HL_CPU_PKT_SIZE - 1)) & HL_CPU_PKT_MASK;
+
+       kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size);
+
+       *dma_handle = hdev->cpu_accessible_dma_address +
+               (kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem);
+
+       return (void *) (uintptr_t) kernel_addr;
+}
+
+void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
+                                       void *vaddr)
+{
+       /* roundup to HL_CPU_PKT_SIZE */
+       size = (size + (HL_CPU_PKT_SIZE - 1)) & HL_CPU_PKT_MASK;
+
+       gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr,
+                       size);
+}
+
+int hl_fw_send_heartbeat(struct hl_device *hdev)
+{
+       struct armcp_packet hb_pkt = {};
+       long result;
+       int rc;
+
+       hb_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST <<
+                                       ARMCP_PKT_CTL_OPCODE_SHIFT);
+       hb_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
+
+       rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
+                       sizeof(hb_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
+
+       if ((rc) || (result != ARMCP_PACKET_FENCE_VAL))
+               rc = -EIO;
+
+       return rc;
+}
+
+int hl_fw_armcp_info_get(struct hl_device *hdev)
+{
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
+       struct armcp_packet pkt = {};
+       void *armcp_info_cpu_addr;
+       dma_addr_t armcp_info_dma_addr;
+       long result;
+       int rc;
+
+       armcp_info_cpu_addr =
+                       hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
+                                       sizeof(struct armcp_info),
+                                       &armcp_info_dma_addr);
+       if (!armcp_info_cpu_addr) {
+               dev_err(hdev->dev,
+                       "Failed to allocate DMA memory for ArmCP info packet\n");
+               return -ENOMEM;
+       }
+
+       memset(armcp_info_cpu_addr, 0, sizeof(struct armcp_info));
+
+       pkt.ctl = cpu_to_le32(ARMCP_PACKET_INFO_GET <<
+                               ARMCP_PKT_CTL_OPCODE_SHIFT);
+       pkt.addr = cpu_to_le64(armcp_info_dma_addr +
+                               prop->host_phys_base_address);
+       pkt.data_max_size = cpu_to_le32(sizeof(struct armcp_info));
+
+       rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+                                       HL_ARMCP_INFO_TIMEOUT_USEC, &result);
+       if (rc) {
+               dev_err(hdev->dev,
+                       "Failed to send armcp info pkt, error %d\n", rc);
+               goto out;
+       }
+
+       memcpy(&prop->armcp_info, armcp_info_cpu_addr,
+                       sizeof(prop->armcp_info));
+
+       rc = hl_build_hwmon_channel_info(hdev, prop->armcp_info.sensors);
+       if (rc) {
+               dev_err(hdev->dev,
+                       "Failed to build hwmon channel info, error %d\n", rc);
+               rc = -EFAULT;
+               goto out;
+       }
+
+out:
+       hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
+                       sizeof(struct armcp_info), armcp_info_cpu_addr);
+
+       return rc;
+}
+
+int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
+{
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
+       struct armcp_packet pkt = {};
+       void *eeprom_info_cpu_addr;
+       dma_addr_t eeprom_info_dma_addr;
+       long result;
+       int rc;
+
+       eeprom_info_cpu_addr =
+                       hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
+                                       max_size, &eeprom_info_dma_addr);
+       if (!eeprom_info_cpu_addr) {
+               dev_err(hdev->dev,
+                       "Failed to allocate DMA memory for EEPROM info packet\n");
+               return -ENOMEM;
+       }
+
+       memset(eeprom_info_cpu_addr, 0, max_size);
+
+       pkt.ctl = cpu_to_le32(ARMCP_PACKET_EEPROM_DATA_GET <<
+                               ARMCP_PKT_CTL_OPCODE_SHIFT);
+       pkt.addr = cpu_to_le64(eeprom_info_dma_addr +
+                               prop->host_phys_base_address);
+       pkt.data_max_size = cpu_to_le32(max_size);
+
+       rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+                       HL_ARMCP_EEPROM_TIMEOUT_USEC, &result);
+
+       if (rc) {
+               dev_err(hdev->dev,
+                       "Failed to send armcp EEPROM pkt, error %d\n", rc);
+               goto out;
+       }
+
+       /* result contains the actual size */
+       memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
+
+out:
+       hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, max_size,
+                       eeprom_info_cpu_addr);
+
+       return rc;
+}
index e458e5ba500bf01ab70dfdfd429d4f8a276c219f..131432f677e2de95810f56409a06b9e428d3163c 100644 (file)
@@ -1,3 +1,4 @@
 subdir-ccflags-y += -I$(src)
 
-HL_GOYA_FILES :=  goya/goya.o goya/goya_security.o goya/goya_hwmgr.o
+HL_GOYA_FILES :=  goya/goya.o goya/goya_security.o goya/goya_hwmgr.o \
+       goya/goya_coresight.o
index ea979ebd62fb8c5f30d08b052a0e481325470ece..bde11fc2c2512e6c05064d6b160c449c995b0f61 100644 (file)
 
 #include <linux/pci.h>
 #include <linux/genalloc.h>
-#include <linux/firmware.h>
 #include <linux/hwmon.h>
 #include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/io-64-nonatomic-hi-lo.h>
 
 /*
  * GOYA security scheme:
@@ -71,7 +69,7 @@
  *
  */
 
-#define GOYA_MMU_REGS_NUM              61
+#define GOYA_MMU_REGS_NUM              63
 
 #define GOYA_DMA_POOL_BLK_SIZE         0x100           /* 256 bytes */
 
 #define GOYA_RESET_WAIT_MSEC           1               /* 1ms */
 #define GOYA_CPU_RESET_WAIT_MSEC       100             /* 100ms */
 #define GOYA_PLDM_RESET_WAIT_MSEC      1000            /* 1s */
-#define GOYA_CPU_TIMEOUT_USEC          10000000        /* 10s */
 #define GOYA_TEST_QUEUE_WAIT_USEC      100000          /* 100ms */
 #define GOYA_PLDM_MMU_TIMEOUT_USEC     (MMU_CONFIG_TIMEOUT_USEC * 100)
 #define GOYA_PLDM_QMAN0_TIMEOUT_USEC   (HL_DEVICE_TIMEOUT_USEC * 30)
 
 #define GOYA_QMAN0_FENCE_VAL           0xD169B243
 
-#define GOYA_MAX_INITIATORS            20
-
 #define GOYA_MAX_STRING_LEN            20
 
 #define GOYA_CB_POOL_CB_CNT            512
@@ -173,12 +168,12 @@ static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = {
        mmMME_SBA_CONTROL_DATA,
        mmMME_SBB_CONTROL_DATA,
        mmMME_SBC_CONTROL_DATA,
-       mmMME_WBC_CONTROL_DATA
+       mmMME_WBC_CONTROL_DATA,
+       mmPCIE_WRAP_PSOC_ARUSER,
+       mmPCIE_WRAP_PSOC_AWUSER
 };
 
-#define GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE 121
-
-static u32 goya_non_fatal_events[GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE] = {
+static u32 goya_all_events[] = {
        GOYA_ASYNC_EVENT_ID_PCIE_IF,
        GOYA_ASYNC_EVENT_ID_TPC0_ECC,
        GOYA_ASYNC_EVENT_ID_TPC1_ECC,
@@ -302,13 +297,6 @@ static u32 goya_non_fatal_events[GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE] = {
        GOYA_ASYNC_EVENT_ID_DMA_BM_CH4
 };
 
-static int goya_armcp_info_get(struct hl_device *hdev);
-static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
-static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
-static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
-static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
-                                       u64 phys_addr);
-
 static void goya_get_fixed_properties(struct hl_device *hdev)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -367,24 +355,13 @@ static void goya_get_fixed_properties(struct hl_device *hdev)
        prop->cfg_size = CFG_SIZE;
        prop->max_asid = MAX_ASID;
        prop->num_of_events = GOYA_ASYNC_EVENT_ID_SIZE;
+       prop->high_pll = PLL_HIGH_DEFAULT;
        prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT;
        prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE;
        prop->max_power_default = MAX_POWER_DEFAULT;
        prop->tpc_enabled_mask = TPC_ENABLED_MASK;
-
-       prop->high_pll = PLL_HIGH_DEFAULT;
-}
-
-int goya_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
-{
-       struct armcp_packet pkt;
-
-       memset(&pkt, 0, sizeof(pkt));
-
-       pkt.ctl = cpu_to_le32(opcode << ARMCP_PKT_CTL_OPCODE_SHIFT);
-
-       return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt,
-                       sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL);
+       prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
+       prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
 }
 
 /*
@@ -398,159 +375,18 @@ int goya_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
  */
 static int goya_pci_bars_map(struct hl_device *hdev)
 {
-       struct pci_dev *pdev = hdev->pdev;
+       static const char * const name[] = {"SRAM_CFG", "MSIX", "DDR"};
+       bool is_wc[3] = {false, false, true};
        int rc;
 
-       rc = pci_request_regions(pdev, HL_NAME);
-       if (rc) {
-               dev_err(hdev->dev, "Cannot obtain PCI resources\n");
+       rc = hl_pci_bars_map(hdev, name, is_wc);
+       if (rc)
                return rc;
-       }
-
-       hdev->pcie_bar[SRAM_CFG_BAR_ID] =
-                       pci_ioremap_bar(pdev, SRAM_CFG_BAR_ID);
-       if (!hdev->pcie_bar[SRAM_CFG_BAR_ID]) {
-               dev_err(hdev->dev, "pci_ioremap_bar failed for CFG\n");
-               rc = -ENODEV;
-               goto err_release_regions;
-       }
-
-       hdev->pcie_bar[MSIX_BAR_ID] = pci_ioremap_bar(pdev, MSIX_BAR_ID);
-       if (!hdev->pcie_bar[MSIX_BAR_ID]) {
-               dev_err(hdev->dev, "pci_ioremap_bar failed for MSIX\n");
-               rc = -ENODEV;
-               goto err_unmap_sram_cfg;
-       }
-
-       hdev->pcie_bar[DDR_BAR_ID] = pci_ioremap_wc_bar(pdev, DDR_BAR_ID);
-       if (!hdev->pcie_bar[DDR_BAR_ID]) {
-               dev_err(hdev->dev, "pci_ioremap_bar failed for DDR\n");
-               rc = -ENODEV;
-               goto err_unmap_msix;
-       }
 
        hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] +
-                               (CFG_BASE - SRAM_BASE_ADDR);
+                       (CFG_BASE - SRAM_BASE_ADDR);
 
        return 0;
-
-err_unmap_msix:
-       iounmap(hdev->pcie_bar[MSIX_BAR_ID]);
-err_unmap_sram_cfg:
-       iounmap(hdev->pcie_bar[SRAM_CFG_BAR_ID]);
-err_release_regions:
-       pci_release_regions(pdev);
-
-       return rc;
-}
-
-/*
- * goya_pci_bars_unmap - Unmap PCI BARS of Goya device
- *
- * @hdev: pointer to hl_device structure
- *
- * Release all PCI BARS and unmap their virtual addresses
- *
- */
-static void goya_pci_bars_unmap(struct hl_device *hdev)
-{
-       struct pci_dev *pdev = hdev->pdev;
-
-       iounmap(hdev->pcie_bar[DDR_BAR_ID]);
-       iounmap(hdev->pcie_bar[MSIX_BAR_ID]);
-       iounmap(hdev->pcie_bar[SRAM_CFG_BAR_ID]);
-       pci_release_regions(pdev);
-}
-
-/*
- * goya_elbi_write - Write through the ELBI interface
- *
- * @hdev: pointer to hl_device structure
- *
- * return 0 on success, -1 on failure
- *
- */
-static int goya_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
-{
-       struct pci_dev *pdev = hdev->pdev;
-       ktime_t timeout;
-       u32 val;
-
-       /* Clear previous status */
-       pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0);
-
-       pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr);
-       pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
-       pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL,
-                               PCI_CONFIG_ELBI_CTRL_WRITE);
-
-       timeout = ktime_add_ms(ktime_get(), 10);
-       for (;;) {
-               pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val);
-               if (val & PCI_CONFIG_ELBI_STS_MASK)
-                       break;
-               if (ktime_compare(ktime_get(), timeout) > 0) {
-                       pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS,
-                                               &val);
-                       break;
-               }
-               usleep_range(300, 500);
-       }
-
-       if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
-               return 0;
-
-       if (val & PCI_CONFIG_ELBI_STS_ERR) {
-               dev_err(hdev->dev, "Error writing to ELBI\n");
-               return -EIO;
-       }
-
-       if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
-               dev_err(hdev->dev, "ELBI write didn't finish in time\n");
-               return -EIO;
-       }
-
-       dev_err(hdev->dev, "ELBI write has undefined bits in status\n");
-       return -EIO;
-}
-
-/*
- * goya_iatu_write - iatu write routine
- *
- * @hdev: pointer to hl_device structure
- *
- */
-static int goya_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
-{
-       u32 dbi_offset;
-       int rc;
-
-       dbi_offset = addr & 0xFFF;
-
-       rc = goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0x00300000);
-       rc |= goya_elbi_write(hdev, mmPCIE_DBI_BASE + dbi_offset, data);
-
-       if (rc)
-               return -EIO;
-
-       return 0;
-}
-
-static void goya_reset_link_through_bridge(struct hl_device *hdev)
-{
-       struct pci_dev *pdev = hdev->pdev;
-       struct pci_dev *parent_port;
-       u16 val;
-
-       parent_port = pdev->bus->self;
-       pci_read_config_word(parent_port, PCI_BRIDGE_CONTROL, &val);
-       val |= PCI_BRIDGE_CTL_BUS_RESET;
-       pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
-       ssleep(1);
-
-       val &= ~(PCI_BRIDGE_CTL_BUS_RESET);
-       pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
-       ssleep(3);
 }
 
 /*
@@ -572,20 +408,9 @@ static int goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
                return 0;
 
        /* Inbound Region 1 - Bar 4 - Point to DDR */
-       rc = goya_iatu_write(hdev, 0x314, lower_32_bits(addr));
-       rc |= goya_iatu_write(hdev, 0x318, upper_32_bits(addr));
-       rc |= goya_iatu_write(hdev, 0x300, 0);
-       /* Enable + Bar match + match enable + Bar 4 */
-       rc |= goya_iatu_write(hdev, 0x304, 0xC0080400);
-
-       /* Return the DBI window to the default location */
-       rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0);
-       rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI_32, 0);
-
-       if (rc) {
-               dev_err(hdev->dev, "failed to map DDR bar to 0x%08llx\n", addr);
-               return -EIO;
-       }
+       rc = hl_pci_set_dram_bar_base(hdev, 1, 4, addr);
+       if (rc)
+               return rc;
 
        if (goya)
                goya->ddr_bar_cur_addr = addr;
@@ -603,40 +428,8 @@ static int goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
  */
 static int goya_init_iatu(struct hl_device *hdev)
 {
-       int rc;
-
-       /* Inbound Region 0 - Bar 0 - Point to SRAM_BASE_ADDR */
-       rc  = goya_iatu_write(hdev, 0x114, lower_32_bits(SRAM_BASE_ADDR));
-       rc |= goya_iatu_write(hdev, 0x118, upper_32_bits(SRAM_BASE_ADDR));
-       rc |= goya_iatu_write(hdev, 0x100, 0);
-       /* Enable + Bar match + match enable */
-       rc |= goya_iatu_write(hdev, 0x104, 0xC0080000);
-
-       /* Inbound Region 1 - Bar 4 - Point to DDR */
-       rc |= goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
-
-       /* Outbound Region 0 - Point to Host */
-       rc |= goya_iatu_write(hdev, 0x008, lower_32_bits(HOST_PHYS_BASE));
-       rc |= goya_iatu_write(hdev, 0x00C, upper_32_bits(HOST_PHYS_BASE));
-       rc |= goya_iatu_write(hdev, 0x010,
-               lower_32_bits(HOST_PHYS_BASE + HOST_PHYS_SIZE - 1));
-       rc |= goya_iatu_write(hdev, 0x014, 0);
-       rc |= goya_iatu_write(hdev, 0x018, 0);
-       rc |= goya_iatu_write(hdev, 0x020,
-               upper_32_bits(HOST_PHYS_BASE + HOST_PHYS_SIZE - 1));
-       /* Increase region size */
-       rc |= goya_iatu_write(hdev, 0x000, 0x00002000);
-       /* Enable */
-       rc |= goya_iatu_write(hdev, 0x004, 0x80000000);
-
-       /* Return the DBI window to the default location */
-       rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0);
-       rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI_32, 0);
-
-       if (rc)
-               return -EIO;
-
-       return 0;
+       return hl_pci_init_iatu(hdev, SRAM_BASE_ADDR, DRAM_PHYS_BASE,
+                               HOST_PHYS_SIZE);
 }
 
 /*
@@ -682,52 +475,9 @@ static int goya_early_init(struct hl_device *hdev)
 
        prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
 
-       /* set DMA mask for GOYA */
-       rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
-       if (rc) {
-               dev_warn(hdev->dev, "Unable to set pci dma mask to 39 bits\n");
-               rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (rc) {
-                       dev_err(hdev->dev,
-                               "Unable to set pci dma mask to 32 bits\n");
-                       return rc;
-               }
-       }
-
-       rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
-       if (rc) {
-               dev_warn(hdev->dev,
-                       "Unable to set pci consistent dma mask to 39 bits\n");
-               rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (rc) {
-                       dev_err(hdev->dev,
-                               "Unable to set pci consistent dma mask to 32 bits\n");
-                       return rc;
-               }
-       }
-
-       if (hdev->reset_pcilink)
-               goya_reset_link_through_bridge(hdev);
-
-       rc = pci_enable_device_mem(pdev);
-       if (rc) {
-               dev_err(hdev->dev, "can't enable PCI device\n");
+       rc = hl_pci_init(hdev, 39);
+       if (rc)
                return rc;
-       }
-
-       pci_set_master(pdev);
-
-       rc = goya_init_iatu(hdev);
-       if (rc) {
-               dev_err(hdev->dev, "Failed to initialize iATU\n");
-               goto disable_device;
-       }
-
-       rc = goya_pci_bars_map(hdev);
-       if (rc) {
-               dev_err(hdev->dev, "Failed to initialize PCI BARS\n");
-               goto disable_device;
-       }
 
        if (!hdev->pldm) {
                val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
@@ -737,12 +487,6 @@ static int goya_early_init(struct hl_device *hdev)
        }
 
        return 0;
-
-disable_device:
-       pci_clear_master(pdev);
-       pci_disable_device(pdev);
-
-       return rc;
 }
 
 /*
@@ -755,14 +499,33 @@ disable_device:
  */
 static int goya_early_fini(struct hl_device *hdev)
 {
-       goya_pci_bars_unmap(hdev);
-
-       pci_clear_master(hdev->pdev);
-       pci_disable_device(hdev->pdev);
+       hl_pci_fini(hdev);
 
        return 0;
 }
 
+static void goya_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid)
+{
+       /* mask to zero the MMBP and ASID bits */
+       WREG32_AND(reg, ~0x7FF);
+       WREG32_OR(reg, asid);
+}
+
+static void goya_qman0_set_security(struct hl_device *hdev, bool secure)
+{
+       struct goya_device *goya = hdev->asic_specific;
+
+       if (!(goya->hw_cap_initialized & HW_CAP_MMU))
+               return;
+
+       if (secure)
+               WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED);
+       else
+               WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED);
+
+       RREG32(mmDMA_QM_0_GLBL_PROT);
+}
+
 /*
  * goya_fetch_psoc_frequency - Fetch PSOC frequency values
  *
@@ -789,10 +552,9 @@ static void goya_fetch_psoc_frequency(struct hl_device *hdev)
 static int goya_late_init(struct hl_device *hdev)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
-       struct goya_device *goya = hdev->asic_specific;
        int rc;
 
-       rc = goya->armcp_info_get(hdev);
+       rc = goya_armcp_info_get(hdev);
        if (rc) {
                dev_err(hdev->dev, "Failed to get armcp info\n");
                return rc;
@@ -804,7 +566,7 @@ static int goya_late_init(struct hl_device *hdev)
         */
        WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size));
 
-       rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
+       rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
        if (rc) {
                dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
                return rc;
@@ -830,7 +592,7 @@ static int goya_late_init(struct hl_device *hdev)
        return 0;
 
 disable_pci_access:
-       goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
+       hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
 
        return rc;
 }
@@ -879,9 +641,6 @@ static int goya_sw_init(struct hl_device *hdev)
        if (!goya)
                return -ENOMEM;
 
-       goya->test_cpu_queue = goya_test_cpu_queue;
-       goya->armcp_info_get = goya_armcp_info_get;
-
        /* according to goya_init_iatu */
        goya->ddr_bar_cur_addr = DRAM_PHYS_BASE;
 
@@ -889,6 +648,9 @@ static int goya_sw_init(struct hl_device *hdev)
        goya->tpc_clk = GOYA_PLL_FREQ_LOW;
        goya->ic_clk = GOYA_PLL_FREQ_LOW;
 
+       goya->mmu_prepare_reg = goya_mmu_prepare_reg;
+       goya->qman0_set_security = goya_qman0_set_security;
+
        hdev->asic_specific = goya;
 
        /* Create DMA pool for small allocations */
@@ -902,19 +664,16 @@ static int goya_sw_init(struct hl_device *hdev)
 
        hdev->cpu_accessible_dma_mem =
                        hdev->asic_funcs->dma_alloc_coherent(hdev,
-                                       CPU_ACCESSIBLE_MEM_SIZE,
+                                       HL_CPU_ACCESSIBLE_MEM_SIZE,
                                        &hdev->cpu_accessible_dma_address,
                                        GFP_KERNEL | __GFP_ZERO);
 
        if (!hdev->cpu_accessible_dma_mem) {
-               dev_err(hdev->dev,
-                       "failed to allocate %d of dma memory for CPU accessible memory space\n",
-                       CPU_ACCESSIBLE_MEM_SIZE);
                rc = -ENOMEM;
                goto free_dma_pool;
        }
 
-       hdev->cpu_accessible_dma_pool = gen_pool_create(CPU_PKT_SHIFT, -1);
+       hdev->cpu_accessible_dma_pool = gen_pool_create(HL_CPU_PKT_SHIFT, -1);
        if (!hdev->cpu_accessible_dma_pool) {
                dev_err(hdev->dev,
                        "Failed to create CPU accessible DMA pool\n");
@@ -924,7 +683,7 @@ static int goya_sw_init(struct hl_device *hdev)
 
        rc = gen_pool_add(hdev->cpu_accessible_dma_pool,
                                (uintptr_t) hdev->cpu_accessible_dma_mem,
-                               CPU_ACCESSIBLE_MEM_SIZE, -1);
+                               HL_CPU_ACCESSIBLE_MEM_SIZE, -1);
        if (rc) {
                dev_err(hdev->dev,
                        "Failed to add memory to CPU accessible DMA pool\n");
@@ -939,7 +698,7 @@ static int goya_sw_init(struct hl_device *hdev)
 free_cpu_pq_pool:
        gen_pool_destroy(hdev->cpu_accessible_dma_pool);
 free_cpu_pq_dma_mem:
-       hdev->asic_funcs->dma_free_coherent(hdev, CPU_ACCESSIBLE_MEM_SIZE,
+       hdev->asic_funcs->dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE,
                        hdev->cpu_accessible_dma_mem,
                        hdev->cpu_accessible_dma_address);
 free_dma_pool:
@@ -962,7 +721,7 @@ static int goya_sw_fini(struct hl_device *hdev)
 
        gen_pool_destroy(hdev->cpu_accessible_dma_pool);
 
-       hdev->asic_funcs->dma_free_coherent(hdev, CPU_ACCESSIBLE_MEM_SIZE,
+       hdev->asic_funcs->dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE,
                        hdev->cpu_accessible_dma_mem,
                        hdev->cpu_accessible_dma_address);
 
@@ -1242,7 +1001,7 @@ static int goya_init_cpu_queues(struct hl_device *hdev)
 
        WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_5, HL_QUEUE_SIZE_IN_BYTES);
        WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_4, HL_EQ_SIZE_IN_BYTES);
-       WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_10, CPU_ACCESSIBLE_MEM_SIZE);
+       WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_10, HL_CPU_ACCESSIBLE_MEM_SIZE);
 
        /* Used for EQ CI */
        WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, 0);
@@ -1688,14 +1447,15 @@ static void goya_init_golden_registers(struct hl_device *hdev)
 
        /*
         * Workaround for H2 #HW-23 bug
-        * Set DMA max outstanding read requests to 240 on DMA CH 1. Set it
-        * to 16 on KMD DMA
-        * We need to limit only these DMAs because the user can only read
+        * Set DMA max outstanding read requests to 240 on DMA CH 1.
+        * This limitation is still large enough to not affect Gen4 bandwidth.
+        * We need to only limit that DMA channel because the user can only read
         * from Host using DMA CH 1
         */
-       WREG32(mmDMA_CH_0_CFG0, 0x0fff0010);
        WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0);
 
+       WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
+
        goya->hw_cap_initialized |= HW_CAP_GOLDEN;
 }
 
@@ -2223,10 +1983,10 @@ static int goya_enable_msix(struct hl_device *hdev)
                }
        }
 
-       irq = pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX);
+       irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX);
 
        rc = request_irq(irq, hl_irq_handler_eq, 0,
-                       goya_irq_name[EVENT_QUEUE_MSIX_IDX],
+                       goya_irq_name[GOYA_EVENT_QUEUE_MSIX_IDX],
                        &hdev->event_queue);
        if (rc) {
                dev_err(hdev->dev, "Failed to request IRQ %d", irq);
@@ -2257,7 +2017,7 @@ static void goya_sync_irqs(struct hl_device *hdev)
        for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
                synchronize_irq(pci_irq_vector(hdev->pdev, i));
 
-       synchronize_irq(pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX));
+       synchronize_irq(pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX));
 }
 
 static void goya_disable_msix(struct hl_device *hdev)
@@ -2270,7 +2030,7 @@ static void goya_disable_msix(struct hl_device *hdev)
 
        goya_sync_irqs(hdev);
 
-       irq = pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX);
+       irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX);
        free_irq(irq, &hdev->event_queue);
 
        for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
@@ -2330,67 +2090,45 @@ static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
 }
 
 /*
- * goya_push_fw_to_device - Push FW code to device
- *
- * @hdev: pointer to hl_device structure
+ * goya_push_uboot_to_device() - Push u-boot FW code to device.
+ * @hdev: Pointer to hl_device structure.
  *
- * Copy fw code from firmware file to device memory.
- * Returns 0 on success
+ * Copy u-boot fw code from firmware file to SRAM BAR.
  *
+ * Return: 0 on success, non-zero for failure.
  */
-static int goya_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
-                                       void __iomem *dst)
+static int goya_push_uboot_to_device(struct hl_device *hdev)
 {
-       const struct firmware *fw;
-       const u64 *fw_data;
-       size_t fw_size, i;
-       int rc;
-
-       rc = request_firmware(&fw, fw_name, hdev->dev);
-
-       if (rc) {
-               dev_err(hdev->dev, "Failed to request %s\n", fw_name);
-               goto out;
-       }
-
-       fw_size = fw->size;
-       if ((fw_size % 4) != 0) {
-               dev_err(hdev->dev, "illegal %s firmware size %zu\n",
-                       fw_name, fw_size);
-               rc = -EINVAL;
-               goto out;
-       }
-
-       dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
-
-       fw_data = (const u64 *) fw->data;
+       char fw_name[200];
+       void __iomem *dst;
 
-       if ((fw->size % 8) != 0)
-               fw_size -= 8;
+       snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-u-boot.bin");
+       dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET;
 
-       for (i = 0 ; i < fw_size ; i += 8, fw_data++, dst += 8) {
-               if (!(i & (0x80000 - 1))) {
-                       dev_dbg(hdev->dev,
-                               "copied so far %zu out of %zu for %s firmware",
-                               i, fw_size, fw_name);
-                       usleep_range(20, 100);
-               }
+       return hl_fw_push_fw_to_device(hdev, fw_name, dst);
+}
 
-               writeq(*fw_data, dst);
-       }
+/*
+ * goya_push_linux_to_device() - Push LINUX FW code to device.
+ * @hdev: Pointer to hl_device structure.
+ *
+ * Copy LINUX fw code from firmware file to HBM BAR.
+ *
+ * Return: 0 on success, non-zero for failure.
+ */
+static int goya_push_linux_to_device(struct hl_device *hdev)
+{
+       char fw_name[200];
+       void __iomem *dst;
 
-       if ((fw->size % 8) != 0)
-               writel(*(const u32 *) fw_data, dst);
+       snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
+       dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
 
-out:
-       release_firmware(fw);
-       return rc;
+       return hl_fw_push_fw_to_device(hdev, fw_name, dst);
 }
 
 static int goya_pldm_init_cpu(struct hl_device *hdev)
 {
-       char fw_name[200];
-       void __iomem *dst;
        u32 val, unit_rst_val;
        int rc;
 
@@ -2408,15 +2146,11 @@ static int goya_pldm_init_cpu(struct hl_device *hdev)
        WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val);
        val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
 
-       snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-u-boot.bin");
-       dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET;
-       rc = goya_push_fw_to_device(hdev, fw_name, dst);
+       rc = goya_push_uboot_to_device(hdev);
        if (rc)
                return rc;
 
-       snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
-       dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
-       rc = goya_push_fw_to_device(hdev, fw_name, dst);
+       rc = goya_push_linux_to_device(hdev);
        if (rc)
                return rc;
 
@@ -2478,8 +2212,6 @@ static void goya_read_device_fw_version(struct hl_device *hdev,
 static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
 {
        struct goya_device *goya = hdev->asic_specific;
-       char fw_name[200];
-       void __iomem *dst;
        u32 status;
        int rc;
 
@@ -2550,6 +2282,11 @@ static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
                                "ARM status %d - DDR initialization failed\n",
                                status);
                        break;
+               case CPU_BOOT_STATUS_UBOOT_NOT_READY:
+                       dev_err(hdev->dev,
+                               "ARM status %d - u-boot stopped by user\n",
+                               status);
+                       break;
                default:
                        dev_err(hdev->dev,
                                "ARM status %d - Invalid status code\n",
@@ -2571,9 +2308,7 @@ static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
                goto out;
        }
 
-       snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
-       dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
-       rc = goya_push_fw_to_device(hdev, fw_name, dst);
+       rc = goya_push_linux_to_device(hdev);
        if (rc)
                return rc;
 
@@ -2606,6 +2341,38 @@ out:
        return 0;
 }
 
+static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
+                                               u64 phys_addr)
+{
+       u32 status, timeout_usec;
+       int rc;
+
+       if (hdev->pldm)
+               timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
+       else
+               timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
+
+       WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
+       WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
+       WREG32(MMU_ASID_BUSY, 0x80000000 | asid);
+
+       rc = hl_poll_timeout(
+               hdev,
+               MMU_ASID_BUSY,
+               status,
+               !(status & 0x80000000),
+               1000,
+               timeout_usec);
+
+       if (rc) {
+               dev_err(hdev->dev,
+                       "Timeout during MMU hop0 config of asid %d\n", asid);
+               return rc;
+       }
+
+       return 0;
+}
+
 static int goya_mmu_init(struct hl_device *hdev)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -2729,28 +2496,16 @@ static int goya_hw_init(struct hl_device *hdev)
                goto disable_msix;
        }
 
-       /* CPU initialization is finished, we can now move to 48 bit DMA mask */
-       rc = pci_set_dma_mask(hdev->pdev, DMA_BIT_MASK(48));
-       if (rc) {
-               dev_warn(hdev->dev, "Unable to set pci dma mask to 48 bits\n");
-               rc = pci_set_dma_mask(hdev->pdev, DMA_BIT_MASK(32));
-               if (rc) {
-                       dev_err(hdev->dev,
-                               "Unable to set pci dma mask to 32 bits\n");
-                       goto disable_pci_access;
-               }
-       }
-
-       rc = pci_set_consistent_dma_mask(hdev->pdev, DMA_BIT_MASK(48));
-       if (rc) {
-               dev_warn(hdev->dev,
-                       "Unable to set pci consistent dma mask to 48 bits\n");
-               rc = pci_set_consistent_dma_mask(hdev->pdev, DMA_BIT_MASK(32));
-               if (rc) {
-                       dev_err(hdev->dev,
-                               "Unable to set pci consistent dma mask to 32 bits\n");
+       /*
+        * Check if we managed to set the DMA mask to more then 32 bits. If so,
+        * let's try to increase it again because in Goya we set the initial
+        * dma mask to less then 39 bits so that the allocation of the memory
+        * area for the device's cpu will be under 39 bits
+        */
+       if (hdev->dma_mask > 32) {
+               rc = hl_pci_set_dma_mask(hdev, 48);
+               if (rc)
                        goto disable_pci_access;
-               }
        }
 
        /* Perform read from the device to flush all MSI-X configuration */
@@ -2759,7 +2514,7 @@ static int goya_hw_init(struct hl_device *hdev)
        return 0;
 
 disable_pci_access:
-       goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
+       hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
 disable_msix:
        goya_disable_msix(hdev);
 disable_queues:
@@ -2866,7 +2621,7 @@ int goya_suspend(struct hl_device *hdev)
 {
        int rc;
 
-       rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
+       rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
        if (rc)
                dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
 
@@ -3067,6 +2822,7 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
        dma_addr_t fence_dma_addr;
        struct hl_cb *cb;
        u32 tmp, timeout;
+       char buf[16] = {};
        int rc;
 
        if (hdev->pldm)
@@ -3074,9 +2830,10 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
        else
                timeout = HL_DEVICE_TIMEOUT_USEC;
 
-       if (!hdev->asic_funcs->is_device_idle(hdev)) {
+       if (!hdev->asic_funcs->is_device_idle(hdev, buf, sizeof(buf))) {
                dev_err_ratelimited(hdev->dev,
-                       "Can't send KMD job on QMAN0 if device is not idle\n");
+                       "Can't send KMD job on QMAN0 because %s is busy\n",
+                       buf);
                return -EBUSY;
        }
 
@@ -3090,10 +2847,7 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
 
        *fence_ptr = 0;
 
-       if (goya->hw_cap_initialized & HW_CAP_MMU) {
-               WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED);
-               RREG32(mmDMA_QM_0_GLBL_PROT);
-       }
+       goya->qman0_set_security(hdev, true);
 
        /*
         * goya cs parser saves space for 2xpacket_msg_prot at end of CB. For
@@ -3135,10 +2889,7 @@ free_fence_ptr:
        hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr,
                                        fence_dma_addr);
 
-       if (goya->hw_cap_initialized & HW_CAP_MMU) {
-               WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED);
-               RREG32(mmDMA_QM_0_GLBL_PROT);
-       }
+       goya->qman0_set_security(hdev, false);
 
        return rc;
 }
@@ -3147,10 +2898,6 @@ int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
                                u32 timeout, long *result)
 {
        struct goya_device *goya = hdev->asic_specific;
-       struct armcp_packet *pkt;
-       dma_addr_t pkt_dma_addr;
-       u32 tmp;
-       int rc = 0;
 
        if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) {
                if (result)
@@ -3158,74 +2905,8 @@ int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
                return 0;
        }
 
-       if (len > CPU_CB_SIZE) {
-               dev_err(hdev->dev, "Invalid CPU message size of %d bytes\n",
-                       len);
-               return -ENOMEM;
-       }
-
-       pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len,
-                                                               &pkt_dma_addr);
-       if (!pkt) {
-               dev_err(hdev->dev,
-                       "Failed to allocate DMA memory for packet to CPU\n");
-               return -ENOMEM;
-       }
-
-       memcpy(pkt, msg, len);
-
-       mutex_lock(&hdev->send_cpu_message_lock);
-
-       if (hdev->disabled)
-               goto out;
-
-       if (hdev->device_cpu_disabled) {
-               rc = -EIO;
-               goto out;
-       }
-
-       rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_CPU_PQ, len,
-                       pkt_dma_addr);
-       if (rc) {
-               dev_err(hdev->dev, "Failed to send CB on CPU PQ (%d)\n", rc);
-               goto out;
-       }
-
-       rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) &pkt->fence,
-                                       timeout, &tmp);
-
-       hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_CPU_PQ);
-
-       if (rc == -ETIMEDOUT) {
-               dev_err(hdev->dev, "Timeout while waiting for device CPU\n");
-               hdev->device_cpu_disabled = true;
-               goto out;
-       }
-
-       if (tmp == ARMCP_PACKET_FENCE_VAL) {
-               u32 ctl = le32_to_cpu(pkt->ctl);
-
-               rc = (ctl & ARMCP_PKT_CTL_RC_MASK) >> ARMCP_PKT_CTL_RC_SHIFT;
-               if (rc) {
-                       dev_err(hdev->dev,
-                               "F/W ERROR %d for CPU packet %d\n",
-                               rc, (ctl & ARMCP_PKT_CTL_OPCODE_MASK)
-                                               >> ARMCP_PKT_CTL_OPCODE_SHIFT);
-                       rc = -EINVAL;
-               } else if (result) {
-                       *result = (long) le64_to_cpu(pkt->result);
-               }
-       } else {
-               dev_err(hdev->dev, "CPU packet wrong fence value\n");
-               rc = -EINVAL;
-       }
-
-out:
-       mutex_unlock(&hdev->send_cpu_message_lock);
-
-       hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, len, pkt);
-
-       return rc;
+       return hl_fw_send_cpu_message(hdev, GOYA_QUEUE_ID_CPU_PQ, msg, len,
+                                       timeout, result);
 }
 
 int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
@@ -3303,38 +2984,20 @@ free_fence_ptr:
 
 int goya_test_cpu_queue(struct hl_device *hdev)
 {
-       struct armcp_packet test_pkt;
-       long result;
-       int rc;
-
-       /* cpu_queues_enable flag is always checked in send cpu message */
-
-       memset(&test_pkt, 0, sizeof(test_pkt));
-
-       test_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST <<
-                                       ARMCP_PKT_CTL_OPCODE_SHIFT);
-       test_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
-
-       rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
-                       sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
+       struct goya_device *goya = hdev->asic_specific;
 
-       if (!rc) {
-               if (result == ARMCP_PACKET_FENCE_VAL)
-                       dev_info(hdev->dev,
-                               "queue test on CPU queue succeeded\n");
-               else
-                       dev_err(hdev->dev,
-                               "CPU queue test failed (0x%08lX)\n", result);
-       } else {
-               dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc);
-       }
+       /*
+        * check capability here as send_cpu_message() won't update the result
+        * value if no capability
+        */
+       if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
+               return 0;
 
-       return rc;
+       return hl_fw_test_cpu_queue(hdev);
 }
 
-static int goya_test_queues(struct hl_device *hdev)
+int goya_test_queues(struct hl_device *hdev)
 {
-       struct goya_device *goya = hdev->asic_specific;
        int i, rc, ret_val = 0;
 
        for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
@@ -3344,7 +3007,7 @@ static int goya_test_queues(struct hl_device *hdev)
        }
 
        if (hdev->cpu_queues_enable) {
-               rc = goya->test_cpu_queue(hdev);
+               rc = goya_test_cpu_queue(hdev);
                if (rc)
                        ret_val = -EINVAL;
        }
@@ -3367,30 +3030,16 @@ static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr,
        dma_pool_free(hdev->dma_pool, vaddr, dma_addr);
 }
 
-static void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev,
-                                       size_t size, dma_addr_t *dma_handle)
+void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
+                                       dma_addr_t *dma_handle)
 {
-       u64 kernel_addr;
-
-       /* roundup to CPU_PKT_SIZE */
-       size = (size + (CPU_PKT_SIZE - 1)) & CPU_PKT_MASK;
-
-       kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size);
-
-       *dma_handle = hdev->cpu_accessible_dma_address +
-               (kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem);
-
-       return (void *) (uintptr_t) kernel_addr;
+       return hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
 }
 
-static void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev,
-                                               size_t size, void *vaddr)
+void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
+                                       void *vaddr)
 {
-       /* roundup to CPU_PKT_SIZE */
-       size = (size + (CPU_PKT_SIZE - 1)) & CPU_PKT_MASK;
-
-       gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr,
-                       size);
+       hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
 }
 
 static int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sg,
@@ -3693,7 +3342,7 @@ static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
         * WA for HW-23.
         * We can't allow user to read from Host using QMANs other than 1.
         */
-       if (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1 &&
+       if (parser->hw_queue_id != GOYA_QUEUE_ID_DMA_1 &&
                hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr),
                                le32_to_cpu(user_dma_pkt->tsize),
                                hdev->asic_prop.va_space_host_start_address,
@@ -4408,6 +4057,9 @@ static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
 {
        struct goya_device *goya = hdev->asic_specific;
 
+       if (hdev->hard_reset_pending)
+               return U64_MAX;
+
        return readq(hdev->pcie_bar[DDR_BAR_ID] +
                        (addr - goya->ddr_bar_cur_addr));
 }
@@ -4416,6 +4068,9 @@ static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val)
 {
        struct goya_device *goya = hdev->asic_specific;
 
+       if (hdev->hard_reset_pending)
+               return;
+
        writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
                        (addr - goya->ddr_bar_cur_addr));
 }
@@ -4605,8 +4260,8 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
        pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
                                                ARMCP_PKT_CTL_OPCODE_SHIFT);
 
-       rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
-                       total_pkt_size, HL_DEVICE_TIMEOUT_USEC, &result);
+       rc = goya_send_cpu_message(hdev, (u32 *) pkt, total_pkt_size,
+                       HL_DEVICE_TIMEOUT_USEC, &result);
 
        if (rc)
                dev_err(hdev->dev, "failed to unmask IRQ array\n");
@@ -4622,8 +4277,8 @@ static int goya_soft_reset_late_init(struct hl_device *hdev)
         * Unmask all IRQs since some could have been received
         * during the soft reset
         */
-       return goya_unmask_irq_arr(hdev, goya_non_fatal_events,
-                       sizeof(goya_non_fatal_events));
+       return goya_unmask_irq_arr(hdev, goya_all_events,
+                                       sizeof(goya_all_events));
 }
 
 static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
@@ -4638,7 +4293,7 @@ static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
                                ARMCP_PKT_CTL_OPCODE_SHIFT);
        pkt.value = cpu_to_le64(event_type);
 
-       rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+       rc = goya_send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
                        HL_DEVICE_TIMEOUT_USEC, &result);
 
        if (rc)
@@ -4854,12 +4509,13 @@ static int goya_context_switch(struct hl_device *hdev, u32 asid)
                return rc;
        }
 
+       WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
        goya_mmu_prepare(hdev, asid);
 
        return 0;
 }
 
-static int goya_mmu_clear_pgt_range(struct hl_device *hdev)
+int goya_mmu_clear_pgt_range(struct hl_device *hdev)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
        struct goya_device *goya = hdev->asic_specific;
@@ -4873,7 +4529,7 @@ static int goya_mmu_clear_pgt_range(struct hl_device *hdev)
        return goya_memset_device_memory(hdev, addr, size, 0, true);
 }
 
-static int goya_mmu_set_dram_default_page(struct hl_device *hdev)
+int goya_mmu_set_dram_default_page(struct hl_device *hdev)
 {
        struct goya_device *goya = hdev->asic_specific;
        u64 addr = hdev->asic_prop.mmu_dram_default_page_addr;
@@ -4886,7 +4542,7 @@ static int goya_mmu_set_dram_default_page(struct hl_device *hdev)
        return goya_memset_device_memory(hdev, addr, size, val, true);
 }
 
-static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
+void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
 {
        struct goya_device *goya = hdev->asic_specific;
        int i;
@@ -4900,10 +4556,8 @@ static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
        }
 
        /* zero the MMBP and ASID bits and then set the ASID */
-       for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++) {
-               WREG32_AND(goya_mmu_regs[i], ~0x7FF);
-               WREG32_OR(goya_mmu_regs[i], asid);
-       }
+       for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++)
+               goya->mmu_prepare_reg(hdev, goya_mmu_regs[i], asid);
 }
 
 static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard)
@@ -4994,107 +4648,29 @@ static void goya_mmu_invalidate_cache_range(struct hl_device *hdev,
                        "Timeout when waiting for MMU cache invalidation\n");
 }
 
-static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
-                                               u64 phys_addr)
-{
-       u32 status, timeout_usec;
-       int rc;
-
-       if (hdev->pldm)
-               timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
-       else
-               timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
-
-       WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
-       WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
-       WREG32(MMU_ASID_BUSY, 0x80000000 | asid);
-
-       rc = hl_poll_timeout(
-               hdev,
-               MMU_ASID_BUSY,
-               status,
-               !(status & 0x80000000),
-               1000,
-               timeout_usec);
-
-       if (rc) {
-               dev_err(hdev->dev,
-                       "Timeout during MMU hop0 config of asid %d\n", asid);
-               return rc;
-       }
-
-       return 0;
-}
-
 int goya_send_heartbeat(struct hl_device *hdev)
 {
        struct goya_device *goya = hdev->asic_specific;
-       struct armcp_packet hb_pkt;
-       long result;
-       int rc;
 
        if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
                return 0;
 
-       memset(&hb_pkt, 0, sizeof(hb_pkt));
-
-       hb_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST <<
-                                       ARMCP_PKT_CTL_OPCODE_SHIFT);
-       hb_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
-
-       rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
-                       sizeof(hb_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
-
-       if ((rc) || (result != ARMCP_PACKET_FENCE_VAL))
-               rc = -EIO;
-
-       return rc;
+       return hl_fw_send_heartbeat(hdev);
 }
 
-static int goya_armcp_info_get(struct hl_device *hdev)
+int goya_armcp_info_get(struct hl_device *hdev)
 {
        struct goya_device *goya = hdev->asic_specific;
        struct asic_fixed_properties *prop = &hdev->asic_prop;
-       struct armcp_packet pkt;
-       void *armcp_info_cpu_addr;
-       dma_addr_t armcp_info_dma_addr;
        u64 dram_size;
-       long result;
        int rc;
 
        if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
                return 0;
 
-       armcp_info_cpu_addr =
-                       hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
-                       sizeof(struct armcp_info), &armcp_info_dma_addr);
-       if (!armcp_info_cpu_addr) {
-               dev_err(hdev->dev,
-                       "Failed to allocate DMA memory for ArmCP info packet\n");
-               return -ENOMEM;
-       }
-
-       memset(armcp_info_cpu_addr, 0, sizeof(struct armcp_info));
-
-       memset(&pkt, 0, sizeof(pkt));
-
-       pkt.ctl = cpu_to_le32(ARMCP_PACKET_INFO_GET <<
-                               ARMCP_PKT_CTL_OPCODE_SHIFT);
-       pkt.addr = cpu_to_le64(armcp_info_dma_addr +
-                               prop->host_phys_base_address);
-       pkt.data_max_size = cpu_to_le32(sizeof(struct armcp_info));
-
-       rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
-                       GOYA_ARMCP_INFO_TIMEOUT, &result);
-
-       if (rc) {
-               dev_err(hdev->dev,
-                       "Failed to send armcp info pkt, error %d\n", rc);
-               goto out;
-       }
-
-       memcpy(&prop->armcp_info, armcp_info_cpu_addr,
-                       sizeof(prop->armcp_info));
+       rc = hl_fw_armcp_info_get(hdev);
+       if (rc)
+               return rc;
 
        dram_size = le64_to_cpu(prop->armcp_info.dram_size);
        if (dram_size) {
@@ -5110,32 +4686,10 @@ static int goya_armcp_info_get(struct hl_device *hdev)
                prop->dram_end_address = prop->dram_base_address + dram_size;
        }
 
-       rc = hl_build_hwmon_channel_info(hdev, prop->armcp_info.sensors);
-       if (rc) {
-               dev_err(hdev->dev,
-                       "Failed to build hwmon channel info, error %d\n", rc);
-               rc = -EFAULT;
-               goto out;
-       }
-
-out:
-       hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
-                       sizeof(struct armcp_info), armcp_info_cpu_addr);
-
-       return rc;
-}
-
-static void goya_init_clock_gating(struct hl_device *hdev)
-{
-
-}
-
-static void goya_disable_clock_gating(struct hl_device *hdev)
-{
-
+       return 0;
 }
 
-static bool goya_is_device_idle(struct hl_device *hdev)
+static bool goya_is_device_idle(struct hl_device *hdev, char *buf, size_t size)
 {
        u64 offset, dma_qm_reg, tpc_qm_reg, tpc_cmdq_reg, tpc_cfg_reg;
        int i;
@@ -5147,7 +4701,7 @@ static bool goya_is_device_idle(struct hl_device *hdev)
 
                if ((RREG32(dma_qm_reg) & DMA_QM_IDLE_MASK) !=
                                DMA_QM_IDLE_MASK)
-                       return false;
+                       return HL_ENG_BUSY(buf, size, "DMA%d_QM", i);
        }
 
        offset = mmTPC1_QM_GLBL_STS0 - mmTPC0_QM_GLBL_STS0;
@@ -5159,31 +4713,31 @@ static bool goya_is_device_idle(struct hl_device *hdev)
 
                if ((RREG32(tpc_qm_reg) & TPC_QM_IDLE_MASK) !=
                                TPC_QM_IDLE_MASK)
-                       return false;
+                       return HL_ENG_BUSY(buf, size, "TPC%d_QM", i);
 
                if ((RREG32(tpc_cmdq_reg) & TPC_CMDQ_IDLE_MASK) !=
                                TPC_CMDQ_IDLE_MASK)
-                       return false;
+                       return HL_ENG_BUSY(buf, size, "TPC%d_CMDQ", i);
 
                if ((RREG32(tpc_cfg_reg) & TPC_CFG_IDLE_MASK) !=
                                TPC_CFG_IDLE_MASK)
-                       return false;
+                       return HL_ENG_BUSY(buf, size, "TPC%d_CFG", i);
        }
 
        if ((RREG32(mmMME_QM_GLBL_STS0) & MME_QM_IDLE_MASK) !=
                        MME_QM_IDLE_MASK)
-               return false;
+               return HL_ENG_BUSY(buf, size, "MME_QM");
 
        if ((RREG32(mmMME_CMDQ_GLBL_STS0) & MME_CMDQ_IDLE_MASK) !=
                        MME_CMDQ_IDLE_MASK)
-               return false;
+               return HL_ENG_BUSY(buf, size, "MME_CMDQ");
 
        if ((RREG32(mmMME_ARCH_STATUS) & MME_ARCH_IDLE_MASK) !=
                        MME_ARCH_IDLE_MASK)
-               return false;
+               return HL_ENG_BUSY(buf, size, "MME_ARCH");
 
        if (RREG32(mmMME_SHADOW_0_STATUS) & MME_SHADOW_IDLE_MASK)
-               return false;
+               return HL_ENG_BUSY(buf, size, "MME");
 
        return true;
 }
@@ -5211,52 +4765,11 @@ static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
                                size_t max_size)
 {
        struct goya_device *goya = hdev->asic_specific;
-       struct asic_fixed_properties *prop = &hdev->asic_prop;
-       struct armcp_packet pkt;
-       void *eeprom_info_cpu_addr;
-       dma_addr_t eeprom_info_dma_addr;
-       long result;
-       int rc;
 
        if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
                return 0;
 
-       eeprom_info_cpu_addr =
-                       hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
-                                       max_size, &eeprom_info_dma_addr);
-       if (!eeprom_info_cpu_addr) {
-               dev_err(hdev->dev,
-                       "Failed to allocate DMA memory for EEPROM info packet\n");
-               return -ENOMEM;
-       }
-
-       memset(eeprom_info_cpu_addr, 0, max_size);
-
-       memset(&pkt, 0, sizeof(pkt));
-
-       pkt.ctl = cpu_to_le32(ARMCP_PACKET_EEPROM_DATA_GET <<
-                               ARMCP_PKT_CTL_OPCODE_SHIFT);
-       pkt.addr = cpu_to_le64(eeprom_info_dma_addr +
-                               prop->host_phys_base_address);
-       pkt.data_max_size = cpu_to_le32(max_size);
-
-       rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
-                       GOYA_ARMCP_EEPROM_TIMEOUT, &result);
-
-       if (rc) {
-               dev_err(hdev->dev,
-                       "Failed to send armcp EEPROM pkt, error %d\n", rc);
-               goto out;
-       }
-
-       /* result contains the actual size */
-       memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
-
-out:
-       hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, max_size,
-                       eeprom_info_cpu_addr);
-
-       return rc;
+       return hl_fw_get_eeprom_data(hdev, data, max_size);
 }
 
 static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
@@ -5306,8 +4819,7 @@ static const struct hl_asic_funcs goya_funcs = {
        .mmu_invalidate_cache = goya_mmu_invalidate_cache,
        .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
        .send_heartbeat = goya_send_heartbeat,
-       .enable_clock_gating = goya_init_clock_gating,
-       .disable_clock_gating = goya_disable_clock_gating,
+       .debug_coresight = goya_debug_coresight,
        .is_device_idle = goya_is_device_idle,
        .soft_reset_late_init = goya_soft_reset_late_init,
        .hw_queues_lock = goya_hw_queues_lock,
@@ -5315,7 +4827,10 @@ static const struct hl_asic_funcs goya_funcs = {
        .get_pci_id = goya_get_pci_id,
        .get_eeprom_data = goya_get_eeprom_data,
        .send_cpu_message = goya_send_cpu_message,
-       .get_hw_state = goya_get_hw_state
+       .get_hw_state = goya_get_hw_state,
+       .pci_bars_map = goya_pci_bars_map,
+       .set_dram_bar_base = goya_set_ddr_bar_base,
+       .init_iatu = goya_init_iatu
 };
 
 /*
index 830551b6b0620242e9bdbaee0bcb2e18ba9cff1d..b572e0263ac58cf8e649fa948ff1e4e8d0ebea5f 100644 (file)
 #error "Number of MSIX interrupts must be smaller or equal to GOYA_MSIX_ENTRIES"
 #endif
 
-#define QMAN_FENCE_TIMEOUT_USEC                10000   /* 10 ms */
+#define QMAN_FENCE_TIMEOUT_USEC                10000           /* 10 ms */
 
-#define QMAN_STOP_TIMEOUT_USEC         100000  /* 100 ms */
+#define QMAN_STOP_TIMEOUT_USEC         100000          /* 100 ms */
+
+#define CORESIGHT_TIMEOUT_USEC         100000          /* 100 ms */
+
+#define GOYA_CPU_TIMEOUT_USEC          10000000        /* 10s */
 
 #define TPC_ENABLED_MASK               0xFF
 
 
 #define MAX_POWER_DEFAULT              200000          /* 200W */
 
-#define GOYA_ARMCP_INFO_TIMEOUT                10000000        /* 10s */
-#define GOYA_ARMCP_EEPROM_TIMEOUT      10000000        /* 10s */
-
 #define DRAM_PHYS_DEFAULT_SIZE         0x100000000ull  /* 4GB */
 
 /* DRAM Memory Map */
 
 #define CPU_FW_IMAGE_SIZE              0x10000000      /* 256MB */
-#define MMU_PAGE_TABLES_SIZE           0x0DE00000      /* 222MB */
+#define MMU_PAGE_TABLES_SIZE           0x0FC00000      /* 252MB */
 #define MMU_DRAM_DEFAULT_PAGE_SIZE     0x00200000      /* 2MB */
 #define MMU_CACHE_MNG_SIZE             0x00001000      /* 4KB */
-#define CPU_PQ_PKT_SIZE                        0x00001000      /* 4KB */
-#define CPU_PQ_DATA_SIZE               0x01FFE000      /* 32MB - 8KB  */
 
 #define CPU_FW_IMAGE_ADDR              DRAM_PHYS_BASE
 #define MMU_PAGE_TABLES_ADDR           (CPU_FW_IMAGE_ADDR + CPU_FW_IMAGE_SIZE)
                                                MMU_PAGE_TABLES_SIZE)
 #define MMU_CACHE_MNG_ADDR             (MMU_DRAM_DEFAULT_PAGE_ADDR + \
                                        MMU_DRAM_DEFAULT_PAGE_SIZE)
-#define CPU_PQ_PKT_ADDR                        (MMU_CACHE_MNG_ADDR + \
+#define DRAM_KMD_END_ADDR              (MMU_CACHE_MNG_ADDR + \
                                                MMU_CACHE_MNG_SIZE)
-#define CPU_PQ_DATA_ADDR               (CPU_PQ_PKT_ADDR + CPU_PQ_PKT_SIZE)
-#define DRAM_BASE_ADDR_USER            (CPU_PQ_DATA_ADDR + CPU_PQ_DATA_SIZE)
 
-#if (DRAM_BASE_ADDR_USER != 0x20000000)
-#error "KMD must reserve 512MB"
+#define DRAM_BASE_ADDR_USER            0x20000000
+
+#if (DRAM_KMD_END_ADDR > DRAM_BASE_ADDR_USER)
+#error "KMD must reserve no more than 512MB"
 #endif
 
 /*
 #define HW_CAP_GOLDEN          0x00000400
 #define HW_CAP_TPC             0x00000800
 
-#define CPU_PKT_SHIFT          5
-#define CPU_PKT_SIZE           (1 << CPU_PKT_SHIFT)
-#define CPU_PKT_MASK           (~((1 << CPU_PKT_SHIFT) - 1))
-#define CPU_MAX_PKTS_IN_CB     32
-#define CPU_CB_SIZE            (CPU_PKT_SIZE * CPU_MAX_PKTS_IN_CB)
-#define CPU_ACCESSIBLE_MEM_SIZE        (HL_QUEUE_LENGTH * CPU_CB_SIZE)
-
 enum goya_fw_component {
        FW_COMP_UBOOT,
        FW_COMP_PREBOOT
 };
 
 struct goya_device {
-       int (*test_cpu_queue)(struct hl_device *hdev);
-       int (*armcp_info_get)(struct hl_device *hdev);
+       void (*mmu_prepare_reg)(struct hl_device *hdev, u64 reg, u32 asid);
+       void (*qman0_set_security)(struct hl_device *hdev, bool secure);
 
        /* TODO: remove hw_queues_lock after moving to scheduler code */
        spinlock_t      hw_queues_lock;
@@ -188,11 +180,16 @@ void goya_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state);
 void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq);
 void goya_add_device_attr(struct hl_device *hdev,
                        struct attribute_group *dev_attr_grp);
+int goya_armcp_info_get(struct hl_device *hdev);
 void goya_init_security(struct hl_device *hdev);
+int goya_debug_coresight(struct hl_device *hdev, void *data);
 u64 goya_get_max_power(struct hl_device *hdev);
 void goya_set_max_power(struct hl_device *hdev, u64 value);
+int goya_test_queues(struct hl_device *hdev);
+void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
+int goya_mmu_clear_pgt_range(struct hl_device *hdev);
+int goya_mmu_set_dram_default_page(struct hl_device *hdev);
 
-int goya_send_pci_access_msg(struct hl_device *hdev, u32 opcode);
 void goya_late_fini(struct hl_device *hdev);
 int goya_suspend(struct hl_device *hdev);
 int goya_resume(struct hl_device *hdev);
@@ -207,5 +204,9 @@ void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
 u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt);
 int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id);
 int goya_send_heartbeat(struct hl_device *hdev);
+void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
+                                       dma_addr_t *dma_handle);
+void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
+                                       void *vaddr);
 
 #endif /* GOYAP_H_ */
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
new file mode 100644 (file)
index 0000000..68726fb
--- /dev/null
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "goyaP.h"
+#include "include/goya/goya_coresight.h"
+#include "include/goya/asic_reg/goya_regs.h"
+
+#include <uapi/misc/habanalabs.h>
+
+#include <linux/coresight.h>
+
+#define GOYA_PLDM_CORESIGHT_TIMEOUT_USEC       (CORESIGHT_TIMEOUT_USEC * 100)
+
+static u64 debug_stm_regs[GOYA_STM_LAST + 1] = {
+       [GOYA_STM_CPU]          = mmCPU_STM_BASE,
+       [GOYA_STM_DMA_CH_0_CS]  = mmDMA_CH_0_CS_STM_BASE,
+       [GOYA_STM_DMA_CH_1_CS]  = mmDMA_CH_1_CS_STM_BASE,
+       [GOYA_STM_DMA_CH_2_CS]  = mmDMA_CH_2_CS_STM_BASE,
+       [GOYA_STM_DMA_CH_3_CS]  = mmDMA_CH_3_CS_STM_BASE,
+       [GOYA_STM_DMA_CH_4_CS]  = mmDMA_CH_4_CS_STM_BASE,
+       [GOYA_STM_DMA_MACRO_CS] = mmDMA_MACRO_CS_STM_BASE,
+       [GOYA_STM_MME1_SBA]     = mmMME1_SBA_STM_BASE,
+       [GOYA_STM_MME3_SBB]     = mmMME3_SBB_STM_BASE,
+       [GOYA_STM_MME4_WACS2]   = mmMME4_WACS2_STM_BASE,
+       [GOYA_STM_MME4_WACS]    = mmMME4_WACS_STM_BASE,
+       [GOYA_STM_MMU_CS]       = mmMMU_CS_STM_BASE,
+       [GOYA_STM_PCIE]         = mmPCIE_STM_BASE,
+       [GOYA_STM_PSOC]         = mmPSOC_STM_BASE,
+       [GOYA_STM_TPC0_EML]     = mmTPC0_EML_STM_BASE,
+       [GOYA_STM_TPC1_EML]     = mmTPC1_EML_STM_BASE,
+       [GOYA_STM_TPC2_EML]     = mmTPC2_EML_STM_BASE,
+       [GOYA_STM_TPC3_EML]     = mmTPC3_EML_STM_BASE,
+       [GOYA_STM_TPC4_EML]     = mmTPC4_EML_STM_BASE,
+       [GOYA_STM_TPC5_EML]     = mmTPC5_EML_STM_BASE,
+       [GOYA_STM_TPC6_EML]     = mmTPC6_EML_STM_BASE,
+       [GOYA_STM_TPC7_EML]     = mmTPC7_EML_STM_BASE
+};
+
+static u64 debug_etf_regs[GOYA_ETF_LAST + 1] = {
+       [GOYA_ETF_CPU_0]        = mmCPU_ETF_0_BASE,
+       [GOYA_ETF_CPU_1]        = mmCPU_ETF_1_BASE,
+       [GOYA_ETF_CPU_TRACE]    = mmCPU_ETF_TRACE_BASE,
+       [GOYA_ETF_DMA_CH_0_CS]  = mmDMA_CH_0_CS_ETF_BASE,
+       [GOYA_ETF_DMA_CH_1_CS]  = mmDMA_CH_1_CS_ETF_BASE,
+       [GOYA_ETF_DMA_CH_2_CS]  = mmDMA_CH_2_CS_ETF_BASE,
+       [GOYA_ETF_DMA_CH_3_CS]  = mmDMA_CH_3_CS_ETF_BASE,
+       [GOYA_ETF_DMA_CH_4_CS]  = mmDMA_CH_4_CS_ETF_BASE,
+       [GOYA_ETF_DMA_MACRO_CS] = mmDMA_MACRO_CS_ETF_BASE,
+       [GOYA_ETF_MME1_SBA]     = mmMME1_SBA_ETF_BASE,
+       [GOYA_ETF_MME3_SBB]     = mmMME3_SBB_ETF_BASE,
+       [GOYA_ETF_MME4_WACS2]   = mmMME4_WACS2_ETF_BASE,
+       [GOYA_ETF_MME4_WACS]    = mmMME4_WACS_ETF_BASE,
+       [GOYA_ETF_MMU_CS]       = mmMMU_CS_ETF_BASE,
+       [GOYA_ETF_PCIE]         = mmPCIE_ETF_BASE,
+       [GOYA_ETF_PSOC]         = mmPSOC_ETF_BASE,
+       [GOYA_ETF_TPC0_EML]     = mmTPC0_EML_ETF_BASE,
+       [GOYA_ETF_TPC1_EML]     = mmTPC1_EML_ETF_BASE,
+       [GOYA_ETF_TPC2_EML]     = mmTPC2_EML_ETF_BASE,
+       [GOYA_ETF_TPC3_EML]     = mmTPC3_EML_ETF_BASE,
+       [GOYA_ETF_TPC4_EML]     = mmTPC4_EML_ETF_BASE,
+       [GOYA_ETF_TPC5_EML]     = mmTPC5_EML_ETF_BASE,
+       [GOYA_ETF_TPC6_EML]     = mmTPC6_EML_ETF_BASE,
+       [GOYA_ETF_TPC7_EML]     = mmTPC7_EML_ETF_BASE
+};
+
+static u64 debug_funnel_regs[GOYA_FUNNEL_LAST + 1] = {
+       [GOYA_FUNNEL_CPU]               = mmCPU_FUNNEL_BASE,
+       [GOYA_FUNNEL_DMA_CH_6_1]        = mmDMA_CH_FUNNEL_6_1_BASE,
+       [GOYA_FUNNEL_DMA_MACRO_3_1]     = mmDMA_MACRO_FUNNEL_3_1_BASE,
+       [GOYA_FUNNEL_MME0_RTR]          = mmMME0_RTR_FUNNEL_BASE,
+       [GOYA_FUNNEL_MME1_RTR]          = mmMME1_RTR_FUNNEL_BASE,
+       [GOYA_FUNNEL_MME2_RTR]          = mmMME2_RTR_FUNNEL_BASE,
+       [GOYA_FUNNEL_MME3_RTR]          = mmMME3_RTR_FUNNEL_BASE,
+       [GOYA_FUNNEL_MME4_RTR]          = mmMME4_RTR_FUNNEL_BASE,
+       [GOYA_FUNNEL_MME5_RTR]          = mmMME5_RTR_FUNNEL_BASE,
+       [GOYA_FUNNEL_PCIE]              = mmPCIE_FUNNEL_BASE,
+       [GOYA_FUNNEL_PSOC]              = mmPSOC_FUNNEL_BASE,
+       [GOYA_FUNNEL_TPC0_EML]          = mmTPC0_EML_FUNNEL_BASE,
+       [GOYA_FUNNEL_TPC1_EML]          = mmTPC1_EML_FUNNEL_BASE,
+       [GOYA_FUNNEL_TPC1_RTR]          = mmTPC1_RTR_FUNNEL_BASE,
+       [GOYA_FUNNEL_TPC2_EML]          = mmTPC2_EML_FUNNEL_BASE,
+       [GOYA_FUNNEL_TPC2_RTR]          = mmTPC2_RTR_FUNNEL_BASE,
+       [GOYA_FUNNEL_TPC3_EML]          = mmTPC3_EML_FUNNEL_BASE,
+       [GOYA_FUNNEL_TPC3_RTR]          = mmTPC3_RTR_FUNNEL_BASE,
+       [GOYA_FUNNEL_TPC4_EML]          = mmTPC4_EML_FUNNEL_BASE,
+       [GOYA_FUNNEL_TPC4_RTR]          = mmTPC4_RTR_FUNNEL_BASE,
+       [GOYA_FUNNEL_TPC5_EML]          = mmTPC5_EML_FUNNEL_BASE,
+       [GOYA_FUNNEL_TPC5_RTR]          = mmTPC5_RTR_FUNNEL_BASE,
+       [GOYA_FUNNEL_TPC6_EML]          = mmTPC6_EML_FUNNEL_BASE,
+       [GOYA_FUNNEL_TPC6_RTR]          = mmTPC6_RTR_FUNNEL_BASE,
+       [GOYA_FUNNEL_TPC7_EML]          = mmTPC7_EML_FUNNEL_BASE
+};
+
+static u64 debug_bmon_regs[GOYA_BMON_LAST + 1] = {
+       [GOYA_BMON_CPU_RD]              = mmCPU_RD_BMON_BASE,
+       [GOYA_BMON_CPU_WR]              = mmCPU_WR_BMON_BASE,
+       [GOYA_BMON_DMA_CH_0_0]          = mmDMA_CH_0_BMON_0_BASE,
+       [GOYA_BMON_DMA_CH_0_1]          = mmDMA_CH_0_BMON_1_BASE,
+       [GOYA_BMON_DMA_CH_1_0]          = mmDMA_CH_1_BMON_0_BASE,
+       [GOYA_BMON_DMA_CH_1_1]          = mmDMA_CH_1_BMON_1_BASE,
+       [GOYA_BMON_DMA_CH_2_0]          = mmDMA_CH_2_BMON_0_BASE,
+       [GOYA_BMON_DMA_CH_2_1]          = mmDMA_CH_2_BMON_1_BASE,
+       [GOYA_BMON_DMA_CH_3_0]          = mmDMA_CH_3_BMON_0_BASE,
+       [GOYA_BMON_DMA_CH_3_1]          = mmDMA_CH_3_BMON_1_BASE,
+       [GOYA_BMON_DMA_CH_4_0]          = mmDMA_CH_4_BMON_0_BASE,
+       [GOYA_BMON_DMA_CH_4_1]          = mmDMA_CH_4_BMON_1_BASE,
+       [GOYA_BMON_DMA_MACRO_0]         = mmDMA_MACRO_BMON_0_BASE,
+       [GOYA_BMON_DMA_MACRO_1]         = mmDMA_MACRO_BMON_1_BASE,
+       [GOYA_BMON_DMA_MACRO_2]         = mmDMA_MACRO_BMON_2_BASE,
+       [GOYA_BMON_DMA_MACRO_3]         = mmDMA_MACRO_BMON_3_BASE,
+       [GOYA_BMON_DMA_MACRO_4]         = mmDMA_MACRO_BMON_4_BASE,
+       [GOYA_BMON_DMA_MACRO_5]         = mmDMA_MACRO_BMON_5_BASE,
+       [GOYA_BMON_DMA_MACRO_6]         = mmDMA_MACRO_BMON_6_BASE,
+       [GOYA_BMON_DMA_MACRO_7]         = mmDMA_MACRO_BMON_7_BASE,
+       [GOYA_BMON_MME1_SBA_0]          = mmMME1_SBA_BMON0_BASE,
+       [GOYA_BMON_MME1_SBA_1]          = mmMME1_SBA_BMON1_BASE,
+       [GOYA_BMON_MME3_SBB_0]          = mmMME3_SBB_BMON0_BASE,
+       [GOYA_BMON_MME3_SBB_1]          = mmMME3_SBB_BMON1_BASE,
+       [GOYA_BMON_MME4_WACS2_0]        = mmMME4_WACS2_BMON0_BASE,
+       [GOYA_BMON_MME4_WACS2_1]        = mmMME4_WACS2_BMON1_BASE,
+       [GOYA_BMON_MME4_WACS2_2]        = mmMME4_WACS2_BMON2_BASE,
+       [GOYA_BMON_MME4_WACS_0]         = mmMME4_WACS_BMON0_BASE,
+       [GOYA_BMON_MME4_WACS_1]         = mmMME4_WACS_BMON1_BASE,
+       [GOYA_BMON_MME4_WACS_2]         = mmMME4_WACS_BMON2_BASE,
+       [GOYA_BMON_MME4_WACS_3]         = mmMME4_WACS_BMON3_BASE,
+       [GOYA_BMON_MME4_WACS_4]         = mmMME4_WACS_BMON4_BASE,
+       [GOYA_BMON_MME4_WACS_5]         = mmMME4_WACS_BMON5_BASE,
+       [GOYA_BMON_MME4_WACS_6]         = mmMME4_WACS_BMON6_BASE,
+       [GOYA_BMON_MMU_0]               = mmMMU_BMON_0_BASE,
+       [GOYA_BMON_MMU_1]               = mmMMU_BMON_1_BASE,
+       [GOYA_BMON_PCIE_MSTR_RD]        = mmPCIE_BMON_MSTR_RD_BASE,
+       [GOYA_BMON_PCIE_MSTR_WR]        = mmPCIE_BMON_MSTR_WR_BASE,
+       [GOYA_BMON_PCIE_SLV_RD]         = mmPCIE_BMON_SLV_RD_BASE,
+       [GOYA_BMON_PCIE_SLV_WR]         = mmPCIE_BMON_SLV_WR_BASE,
+       [GOYA_BMON_TPC0_EML_0]          = mmTPC0_EML_BUSMON_0_BASE,
+       [GOYA_BMON_TPC0_EML_1]          = mmTPC0_EML_BUSMON_1_BASE,
+       [GOYA_BMON_TPC0_EML_2]          = mmTPC0_EML_BUSMON_2_BASE,
+       [GOYA_BMON_TPC0_EML_3]          = mmTPC0_EML_BUSMON_3_BASE,
+       [GOYA_BMON_TPC1_EML_0]          = mmTPC1_EML_BUSMON_0_BASE,
+       [GOYA_BMON_TPC1_EML_1]          = mmTPC1_EML_BUSMON_1_BASE,
+       [GOYA_BMON_TPC1_EML_2]          = mmTPC1_EML_BUSMON_2_BASE,
+       [GOYA_BMON_TPC1_EML_3]          = mmTPC1_EML_BUSMON_3_BASE,
+       [GOYA_BMON_TPC2_EML_0]          = mmTPC2_EML_BUSMON_0_BASE,
+       [GOYA_BMON_TPC2_EML_1]          = mmTPC2_EML_BUSMON_1_BASE,
+       [GOYA_BMON_TPC2_EML_2]          = mmTPC2_EML_BUSMON_2_BASE,
+       [GOYA_BMON_TPC2_EML_3]          = mmTPC2_EML_BUSMON_3_BASE,
+       [GOYA_BMON_TPC3_EML_0]          = mmTPC3_EML_BUSMON_0_BASE,
+       [GOYA_BMON_TPC3_EML_1]          = mmTPC3_EML_BUSMON_1_BASE,
+       [GOYA_BMON_TPC3_EML_2]          = mmTPC3_EML_BUSMON_2_BASE,
+       [GOYA_BMON_TPC3_EML_3]          = mmTPC3_EML_BUSMON_3_BASE,
+       [GOYA_BMON_TPC4_EML_0]          = mmTPC4_EML_BUSMON_0_BASE,
+       [GOYA_BMON_TPC4_EML_1]          = mmTPC4_EML_BUSMON_1_BASE,
+       [GOYA_BMON_TPC4_EML_2]          = mmTPC4_EML_BUSMON_2_BASE,
+       [GOYA_BMON_TPC4_EML_3]          = mmTPC4_EML_BUSMON_3_BASE,
+       [GOYA_BMON_TPC5_EML_0]          = mmTPC5_EML_BUSMON_0_BASE,
+       [GOYA_BMON_TPC5_EML_1]          = mmTPC5_EML_BUSMON_1_BASE,
+       [GOYA_BMON_TPC5_EML_2]          = mmTPC5_EML_BUSMON_2_BASE,
+       [GOYA_BMON_TPC5_EML_3]          = mmTPC5_EML_BUSMON_3_BASE,
+       [GOYA_BMON_TPC6_EML_0]          = mmTPC6_EML_BUSMON_0_BASE,
+       [GOYA_BMON_TPC6_EML_1]          = mmTPC6_EML_BUSMON_1_BASE,
+       [GOYA_BMON_TPC6_EML_2]          = mmTPC6_EML_BUSMON_2_BASE,
+       [GOYA_BMON_TPC6_EML_3]          = mmTPC6_EML_BUSMON_3_BASE,
+       [GOYA_BMON_TPC7_EML_0]          = mmTPC7_EML_BUSMON_0_BASE,
+       [GOYA_BMON_TPC7_EML_1]          = mmTPC7_EML_BUSMON_1_BASE,
+       [GOYA_BMON_TPC7_EML_2]          = mmTPC7_EML_BUSMON_2_BASE,
+       [GOYA_BMON_TPC7_EML_3]          = mmTPC7_EML_BUSMON_3_BASE
+};
+
+static u64 debug_spmu_regs[GOYA_SPMU_LAST + 1] = {
+       [GOYA_SPMU_DMA_CH_0_CS]         = mmDMA_CH_0_CS_SPMU_BASE,
+       [GOYA_SPMU_DMA_CH_1_CS]         = mmDMA_CH_1_CS_SPMU_BASE,
+       [GOYA_SPMU_DMA_CH_2_CS]         = mmDMA_CH_2_CS_SPMU_BASE,
+       [GOYA_SPMU_DMA_CH_3_CS]         = mmDMA_CH_3_CS_SPMU_BASE,
+       [GOYA_SPMU_DMA_CH_4_CS]         = mmDMA_CH_4_CS_SPMU_BASE,
+       [GOYA_SPMU_DMA_MACRO_CS]        = mmDMA_MACRO_CS_SPMU_BASE,
+       [GOYA_SPMU_MME1_SBA]            = mmMME1_SBA_SPMU_BASE,
+       [GOYA_SPMU_MME3_SBB]            = mmMME3_SBB_SPMU_BASE,
+       [GOYA_SPMU_MME4_WACS2]          = mmMME4_WACS2_SPMU_BASE,
+       [GOYA_SPMU_MME4_WACS]           = mmMME4_WACS_SPMU_BASE,
+       [GOYA_SPMU_MMU_CS]              = mmMMU_CS_SPMU_BASE,
+       [GOYA_SPMU_PCIE]                = mmPCIE_SPMU_BASE,
+       [GOYA_SPMU_TPC0_EML]            = mmTPC0_EML_SPMU_BASE,
+       [GOYA_SPMU_TPC1_EML]            = mmTPC1_EML_SPMU_BASE,
+       [GOYA_SPMU_TPC2_EML]            = mmTPC2_EML_SPMU_BASE,
+       [GOYA_SPMU_TPC3_EML]            = mmTPC3_EML_SPMU_BASE,
+       [GOYA_SPMU_TPC4_EML]            = mmTPC4_EML_SPMU_BASE,
+       [GOYA_SPMU_TPC5_EML]            = mmTPC5_EML_SPMU_BASE,
+       [GOYA_SPMU_TPC6_EML]            = mmTPC6_EML_SPMU_BASE,
+       [GOYA_SPMU_TPC7_EML]            = mmTPC7_EML_SPMU_BASE
+};
+
+static int goya_coresight_timeout(struct hl_device *hdev, u64 addr,
+               int position, bool up)
+{
+       int rc;
+       u32 val, timeout_usec;
+
+       if (hdev->pldm)
+               timeout_usec = GOYA_PLDM_CORESIGHT_TIMEOUT_USEC;
+       else
+               timeout_usec = CORESIGHT_TIMEOUT_USEC;
+
+       rc = hl_poll_timeout(
+               hdev,
+               addr,
+               val,
+               up ? val & BIT(position) : !(val & BIT(position)),
+               1000,
+               timeout_usec);
+
+       if (rc) {
+               dev_err(hdev->dev,
+                       "Timeout while waiting for coresight, addr: 0x%llx, position: %d, up: %d\n",
+                               addr, position, up);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+static int goya_config_stm(struct hl_device *hdev,
+               struct hl_debug_params *params)
+{
+       struct hl_debug_params_stm *input;
+       u64 base_reg = debug_stm_regs[params->reg_idx] - CFG_BASE;
+       int rc;
+
+       WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
+
+       if (params->enable) {
+               input = params->input;
+
+               if (!input)
+                       return -EINVAL;
+
+               WREG32(base_reg + 0xE80, 0x80004);
+               WREG32(base_reg + 0xD64, 7);
+               WREG32(base_reg + 0xD60, 0);
+               WREG32(base_reg + 0xD00, lower_32_bits(input->he_mask));
+               WREG32(base_reg + 0xD20, lower_32_bits(input->sp_mask));
+               WREG32(base_reg + 0xD60, 1);
+               WREG32(base_reg + 0xD00, upper_32_bits(input->he_mask));
+               WREG32(base_reg + 0xD20, upper_32_bits(input->sp_mask));
+               WREG32(base_reg + 0xE70, 0x10);
+               WREG32(base_reg + 0xE60, 0);
+               WREG32(base_reg + 0xE64, 0x420000);
+               WREG32(base_reg + 0xE00, 0xFFFFFFFF);
+               WREG32(base_reg + 0xE20, 0xFFFFFFFF);
+               WREG32(base_reg + 0xEF4, input->id);
+               WREG32(base_reg + 0xDF4, 0x80);
+               WREG32(base_reg + 0xE8C, input->frequency);
+               WREG32(base_reg + 0xE90, 0x7FF);
+               WREG32(base_reg + 0xE80, 0x7 | (input->id << 16));
+       } else {
+               WREG32(base_reg + 0xE80, 4);
+               WREG32(base_reg + 0xD64, 0);
+               WREG32(base_reg + 0xD60, 1);
+               WREG32(base_reg + 0xD00, 0);
+               WREG32(base_reg + 0xD20, 0);
+               WREG32(base_reg + 0xD60, 0);
+               WREG32(base_reg + 0xE20, 0);
+               WREG32(base_reg + 0xE00, 0);
+               WREG32(base_reg + 0xDF4, 0x80);
+               WREG32(base_reg + 0xE70, 0);
+               WREG32(base_reg + 0xE60, 0);
+               WREG32(base_reg + 0xE64, 0);
+               WREG32(base_reg + 0xE8C, 0);
+
+               rc = goya_coresight_timeout(hdev, base_reg + 0xE80, 23, false);
+               if (rc) {
+                       dev_err(hdev->dev,
+                               "Failed to disable STM on timeout, error %d\n",
+                               rc);
+                       return rc;
+               }
+
+               WREG32(base_reg + 0xE80, 4);
+       }
+
+       return 0;
+}
+
+static int goya_config_etf(struct hl_device *hdev,
+               struct hl_debug_params *params)
+{
+       struct hl_debug_params_etf *input;
+       u64 base_reg = debug_etf_regs[params->reg_idx] - CFG_BASE;
+       u32 val;
+       int rc;
+
+       WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
+
+       val = RREG32(base_reg + 0x304);
+       val |= 0x1000;
+       WREG32(base_reg + 0x304, val);
+       val |= 0x40;
+       WREG32(base_reg + 0x304, val);
+
+       rc = goya_coresight_timeout(hdev, base_reg + 0x304, 6, false);
+       if (rc) {
+               dev_err(hdev->dev,
+                       "Failed to %s ETF on timeout, error %d\n",
+                               params->enable ? "enable" : "disable", rc);
+               return rc;
+       }
+
+       rc = goya_coresight_timeout(hdev, base_reg + 0xC, 2, true);
+       if (rc) {
+               dev_err(hdev->dev,
+                       "Failed to %s ETF on timeout, error %d\n",
+                               params->enable ? "enable" : "disable", rc);
+               return rc;
+       }
+
+       WREG32(base_reg + 0x20, 0);
+
+       if (params->enable) {
+               input = params->input;
+
+               if (!input)
+                       return -EINVAL;
+
+               WREG32(base_reg + 0x34, 0x3FFC);
+               WREG32(base_reg + 0x28, input->sink_mode);
+               WREG32(base_reg + 0x304, 0x4001);
+               WREG32(base_reg + 0x308, 0xA);
+               WREG32(base_reg + 0x20, 1);
+       } else {
+               WREG32(base_reg + 0x34, 0);
+               WREG32(base_reg + 0x28, 0);
+               WREG32(base_reg + 0x304, 0);
+       }
+
+       return 0;
+}
+
+static int goya_etr_validate_address(struct hl_device *hdev, u64 addr,
+               u32 size)
+{
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
+       u64 range_start, range_end;
+
+       if (hdev->mmu_enable) {
+               range_start = prop->va_space_dram_start_address;
+               range_end = prop->va_space_dram_end_address;
+       } else {
+               range_start = prop->dram_user_base_address;
+               range_end = prop->dram_end_address;
+       }
+
+       return hl_mem_area_inside_range(addr, size, range_start, range_end);
+}
+
+static int goya_config_etr(struct hl_device *hdev,
+               struct hl_debug_params *params)
+{
+       struct hl_debug_params_etr *input;
+       u64 base_reg = mmPSOC_ETR_BASE - CFG_BASE;
+       u32 val;
+       int rc;
+
+       WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
+
+       val = RREG32(base_reg + 0x304);
+       val |= 0x1000;
+       WREG32(base_reg + 0x304, val);
+       val |= 0x40;
+       WREG32(base_reg + 0x304, val);
+
+       rc = goya_coresight_timeout(hdev, base_reg + 0x304, 6, false);
+       if (rc) {
+               dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
+                               params->enable ? "enable" : "disable", rc);
+               return rc;
+       }
+
+       rc = goya_coresight_timeout(hdev, base_reg + 0xC, 2, true);
+       if (rc) {
+               dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
+                               params->enable ? "enable" : "disable", rc);
+               return rc;
+       }
+
+       WREG32(base_reg + 0x20, 0);
+
+       if (params->enable) {
+               input = params->input;
+
+               if (!input)
+                       return -EINVAL;
+
+               if (input->buffer_size == 0) {
+                       dev_err(hdev->dev,
+                               "ETR buffer size should be bigger than 0\n");
+                       return -EINVAL;
+               }
+
+               if (!goya_etr_validate_address(hdev,
+                               input->buffer_address, input->buffer_size)) {
+                       dev_err(hdev->dev, "buffer address is not valid\n");
+                       return -EINVAL;
+               }
+
+               WREG32(base_reg + 0x34, 0x3FFC);
+               WREG32(base_reg + 0x4, input->buffer_size);
+               WREG32(base_reg + 0x28, input->sink_mode);
+               WREG32(base_reg + 0x110, 0x700);
+               WREG32(base_reg + 0x118,
+                               lower_32_bits(input->buffer_address));
+               WREG32(base_reg + 0x11C,
+                               upper_32_bits(input->buffer_address));
+               WREG32(base_reg + 0x304, 3);
+               WREG32(base_reg + 0x308, 0xA);
+               WREG32(base_reg + 0x20, 1);
+       } else {
+               WREG32(base_reg + 0x34, 0);
+               WREG32(base_reg + 0x4, 0x400);
+               WREG32(base_reg + 0x118, 0);
+               WREG32(base_reg + 0x11C, 0);
+               WREG32(base_reg + 0x308, 0);
+               WREG32(base_reg + 0x28, 0);
+               WREG32(base_reg + 0x304, 0);
+
+               if (params->output_size >= sizeof(u32))
+                       *(u32 *) params->output = RREG32(base_reg + 0x18);
+       }
+
+       return 0;
+}
+
+static int goya_config_funnel(struct hl_device *hdev,
+               struct hl_debug_params *params)
+{
+       WREG32(debug_funnel_regs[params->reg_idx] - CFG_BASE + 0xFB0,
+                       CORESIGHT_UNLOCK);
+
+       WREG32(debug_funnel_regs[params->reg_idx] - CFG_BASE,
+                       params->enable ? 0x33F : 0);
+
+       return 0;
+}
+
+static int goya_config_bmon(struct hl_device *hdev,
+               struct hl_debug_params *params)
+{
+       struct hl_debug_params_bmon *input;
+       u64 base_reg = debug_bmon_regs[params->reg_idx] - CFG_BASE;
+       u32 pcie_base = 0;
+
+       WREG32(base_reg + 0x104, 1);
+
+       if (params->enable) {
+               input = params->input;
+
+               if (!input)
+                       return -EINVAL;
+
+               WREG32(base_reg + 0x208, lower_32_bits(input->addr_range0));
+               WREG32(base_reg + 0x20C, upper_32_bits(input->addr_range0));
+               WREG32(base_reg + 0x248, lower_32_bits(input->addr_range1));
+               WREG32(base_reg + 0x24C, upper_32_bits(input->addr_range1));
+               WREG32(base_reg + 0x224, 0);
+               WREG32(base_reg + 0x234, 0);
+               WREG32(base_reg + 0x30C, input->bw_win);
+               WREG32(base_reg + 0x308, input->win_capture);
+
+               /* PCIE IF BMON bug WA */
+               if (params->reg_idx != GOYA_BMON_PCIE_MSTR_RD &&
+                               params->reg_idx != GOYA_BMON_PCIE_MSTR_WR &&
+                               params->reg_idx != GOYA_BMON_PCIE_SLV_RD &&
+                               params->reg_idx != GOYA_BMON_PCIE_SLV_WR)
+                       pcie_base = 0xA000000;
+
+               WREG32(base_reg + 0x700, pcie_base | 0xB00 | (input->id << 12));
+               WREG32(base_reg + 0x708, pcie_base | 0xA00 | (input->id << 12));
+               WREG32(base_reg + 0x70C, pcie_base | 0xC00 | (input->id << 12));
+
+               WREG32(base_reg + 0x100, 0x11);
+               WREG32(base_reg + 0x304, 0x1);
+       } else {
+               WREG32(base_reg + 0x208, 0xFFFFFFFF);
+               WREG32(base_reg + 0x20C, 0xFFFFFFFF);
+               WREG32(base_reg + 0x248, 0xFFFFFFFF);
+               WREG32(base_reg + 0x24C, 0xFFFFFFFF);
+               WREG32(base_reg + 0x224, 0xFFFFFFFF);
+               WREG32(base_reg + 0x234, 0x1070F);
+               WREG32(base_reg + 0x30C, 0);
+               WREG32(base_reg + 0x308, 0xFFFF);
+               WREG32(base_reg + 0x700, 0xA000B00);
+               WREG32(base_reg + 0x708, 0xA000A00);
+               WREG32(base_reg + 0x70C, 0xA000C00);
+               WREG32(base_reg + 0x100, 1);
+               WREG32(base_reg + 0x304, 0);
+               WREG32(base_reg + 0x104, 0);
+       }
+
+       return 0;
+}
+
+static int goya_config_spmu(struct hl_device *hdev,
+               struct hl_debug_params *params)
+{
+       u64 base_reg = debug_spmu_regs[params->reg_idx] - CFG_BASE;
+       struct hl_debug_params_spmu *input = params->input;
+       u64 *output;
+       u32 output_arr_len;
+       u32 events_num;
+       u32 overflow_idx;
+       u32 cycle_cnt_idx;
+       int i;
+
+       if (params->enable) {
+               input = params->input;
+
+               if (!input)
+                       return -EINVAL;
+
+               if (input->event_types_num < 3) {
+                       dev_err(hdev->dev,
+                               "not enough values for SPMU enable\n");
+                       return -EINVAL;
+               }
+
+               WREG32(base_reg + 0xE04, 0x41013046);
+               WREG32(base_reg + 0xE04, 0x41013040);
+
+               for (i = 0 ; i < input->event_types_num ; i++)
+                       WREG32(base_reg + 0x400 + i * 4, input->event_types[i]);
+
+               WREG32(base_reg + 0xE04, 0x41013041);
+               WREG32(base_reg + 0xC00, 0x8000003F);
+       } else {
+               output = params->output;
+               output_arr_len = params->output_size / 8;
+               events_num = output_arr_len - 2;
+               overflow_idx = output_arr_len - 2;
+               cycle_cnt_idx = output_arr_len - 1;
+
+               if (!output)
+                       return -EINVAL;
+
+               if (output_arr_len < 3) {
+                       dev_err(hdev->dev,
+                               "not enough values for SPMU disable\n");
+                       return -EINVAL;
+               }
+
+               WREG32(base_reg + 0xE04, 0x41013040);
+
+               for (i = 0 ; i < events_num ; i++)
+                       output[i] = RREG32(base_reg + i * 8);
+
+               output[overflow_idx] = RREG32(base_reg + 0xCC0);
+
+               output[cycle_cnt_idx] = RREG32(base_reg + 0xFC);
+               output[cycle_cnt_idx] <<= 32;
+               output[cycle_cnt_idx] |= RREG32(base_reg + 0xF8);
+
+               WREG32(base_reg + 0xCC0, 0);
+       }
+
+       return 0;
+}
+
+static int goya_config_timestamp(struct hl_device *hdev,
+               struct hl_debug_params *params)
+{
+       WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
+       if (params->enable) {
+               WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0);
+               WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0);
+               WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1);
+       }
+
+       return 0;
+}
+
+int goya_debug_coresight(struct hl_device *hdev, void *data)
+{
+       struct hl_debug_params *params = data;
+       u32 val;
+       int rc;
+
+       switch (params->op) {
+       case HL_DEBUG_OP_STM:
+               rc = goya_config_stm(hdev, params);
+               break;
+       case HL_DEBUG_OP_ETF:
+               rc = goya_config_etf(hdev, params);
+               break;
+       case HL_DEBUG_OP_ETR:
+               rc = goya_config_etr(hdev, params);
+               break;
+       case HL_DEBUG_OP_FUNNEL:
+               rc = goya_config_funnel(hdev, params);
+               break;
+       case HL_DEBUG_OP_BMON:
+               rc = goya_config_bmon(hdev, params);
+               break;
+       case HL_DEBUG_OP_SPMU:
+               rc = goya_config_spmu(hdev, params);
+               break;
+       case HL_DEBUG_OP_TIMESTAMP:
+               rc = goya_config_timestamp(hdev, params);
+               break;
+
+       default:
+               dev_err(hdev->dev, "Unknown coresight id %d\n", params->op);
+               return -EINVAL;
+       }
+
+       /* Perform read from the device to flush all configuration */
+       val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+
+       return rc;
+}
index 57500323840132241d20f613b2e6f474a3634c82..d95d1b2f860d865ef4ede02938c3e3eb077e83f9 100644 (file)
@@ -6,6 +6,7 @@
  */
 
 #include "goyaP.h"
+#include "include/goya/asic_reg/goya_regs.h"
 
 /*
  * goya_set_block_as_protected - set the given block as protected
@@ -2159,6 +2160,8 @@ static void goya_init_protection_bits(struct hl_device *hdev)
         * Bits 7-11 represents the word offset inside the 128 bytes.
         * Bits 2-6 represents the bit location inside the word.
         */
+       u32 pb_addr, mask;
+       u8 word_offset;
 
        goya_pb_set_block(hdev, mmPCI_NRTR_BASE);
        goya_pb_set_block(hdev, mmPCI_RD_REGULATOR_BASE);
@@ -2237,6 +2240,14 @@ static void goya_init_protection_bits(struct hl_device *hdev)
        goya_pb_set_block(hdev, mmPCIE_AUX_BASE);
        goya_pb_set_block(hdev, mmPCIE_DB_RSV_BASE);
        goya_pb_set_block(hdev, mmPCIE_PHY_BASE);
+       goya_pb_set_block(hdev, mmTPC0_NRTR_BASE);
+       goya_pb_set_block(hdev, mmTPC_PLL_BASE);
+
+       pb_addr = (mmTPC_PLL_CLK_RLX_0 & ~0xFFF) + PROT_BITS_OFFS;
+       word_offset = ((mmTPC_PLL_CLK_RLX_0 & PROT_BITS_OFFS) >> 7) << 2;
+       mask = 1 << ((mmTPC_PLL_CLK_RLX_0 & 0x7C) >> 2);
+
+       WREG32(pb_addr + word_offset, mask);
 
        goya_init_mme_protection_bits(hdev);
 
@@ -2294,8 +2305,8 @@ void goya_init_security(struct hl_device *hdev)
        u32 lbw_rng10_base = 0xFCC60000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
        u32 lbw_rng10_mask = 0xFFFE0000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
 
-       u32 lbw_rng11_base = 0xFCE00000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
-       u32 lbw_rng11_mask = 0xFFFFC000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+       u32 lbw_rng11_base = 0xFCE02000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+       u32 lbw_rng11_mask = 0xFFFFE000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
 
        u32 lbw_rng12_base = 0xFE484000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
        u32 lbw_rng12_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
index a8ee52c880cd800651681b866048126b2e9fc478..2f02bb55f66aa2ef8896b7cac524af0e3e724286 100644 (file)
@@ -33,6 +33,9 @@
 
 #define HL_PLL_LOW_JOB_FREQ_USEC       5000000 /* 5 s */
 
+#define HL_ARMCP_INFO_TIMEOUT_USEC     10000000 /* 10s */
+#define HL_ARMCP_EEPROM_TIMEOUT_USEC   10000000 /* 10s */
+
 #define HL_MAX_QUEUES                  128
 
 #define HL_MAX_JOBS_PER_CS             64
@@ -48,8 +51,9 @@
 
 /**
  * struct pgt_info - MMU hop page info.
- * @node: hash linked-list node for the pgts hash of pgts.
- * @addr: physical address of the pgt.
+ * @node: hash linked-list node for the pgts shadow hash of pgts.
+ * @phys_addr: physical address of the pgt.
+ * @shadow_addr: shadow hop in the host.
  * @ctx: pointer to the owner ctx.
  * @num_of_ptes: indicates how many ptes are used in the pgt.
  *
  * page, it is freed with its pgt_info structure.
  */
 struct pgt_info {
-       struct hlist_node node;
-       u64 addr;
-       struct hl_ctx *ctx;
-       int num_of_ptes;
+       struct hlist_node       node;
+       u64                     phys_addr;
+       u64                     shadow_addr;
+       struct hl_ctx           *ctx;
+       int                     num_of_ptes;
 };
 
 struct hl_device;
@@ -145,6 +150,8 @@ enum hl_device_hw_state {
  *                             mapping DRAM memory.
  * @dram_size_for_default_page_mapping: DRAM size needed to map to avoid page
  *                                      fault.
+ * @pcie_dbi_base_address: Base address of the PCIE_DBI block.
+ * @pcie_aux_dbi_reg_addr: Address of the PCIE_AUX DBI register.
  * @mmu_pgt_addr: base physical address in DRAM of MMU page tables.
  * @mmu_dram_default_page_addr: DRAM default page physical address.
  * @mmu_pgt_size: MMU page tables total size.
@@ -186,6 +193,8 @@ struct asic_fixed_properties {
        u64                     va_space_dram_start_address;
        u64                     va_space_dram_end_address;
        u64                     dram_size_for_default_page_mapping;
+       u64                     pcie_dbi_base_address;
+       u64                     pcie_aux_dbi_reg_addr;
        u64                     mmu_pgt_addr;
        u64                     mmu_dram_default_page_addr;
        u32                     mmu_pgt_size;
@@ -381,14 +390,12 @@ struct hl_eq {
 
 /**
  * enum hl_asic_type - supported ASIC types.
- * @ASIC_AUTO_DETECT: ASIC type will be automatically set.
- * @ASIC_GOYA: Goya device.
  * @ASIC_INVALID: Invalid ASIC type.
+ * @ASIC_GOYA: Goya device.
  */
 enum hl_asic_type {
-       ASIC_AUTO_DETECT,
-       ASIC_GOYA,
-       ASIC_INVALID
+       ASIC_INVALID,
+       ASIC_GOYA
 };
 
 struct hl_cs_parser;
@@ -472,8 +479,7 @@ enum hl_pll_frequency {
  * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
  *                              ASID-VA-size mask.
  * @send_heartbeat: send is-alive packet to ArmCP and verify response.
- * @enable_clock_gating: enable clock gating for reducing power consumption.
- * @disable_clock_gating: disable clock for accessing registers on HBW.
+ * @debug_coresight: perform certain actions on Coresight for debugging.
  * @is_device_idle: return true if device is idle, false otherwise.
  * @soft_reset_late_init: perform certain actions needed after soft reset.
  * @hw_queues_lock: acquire H/W queues lock.
@@ -482,6 +488,9 @@ enum hl_pll_frequency {
  * @get_eeprom_data: retrieve EEPROM data from F/W.
  * @send_cpu_message: send buffer to ArmCP.
  * @get_hw_state: retrieve the H/W state
+ * @pci_bars_map: Map PCI BARs.
+ * @set_dram_bar_base: Set DRAM BAR to map specific device address.
+ * @init_iatu: Initialize the iATU unit inside the PCI controller.
  */
 struct hl_asic_funcs {
        int (*early_init)(struct hl_device *hdev);
@@ -543,9 +552,8 @@ struct hl_asic_funcs {
        void (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
                        u32 asid, u64 va, u64 size);
        int (*send_heartbeat)(struct hl_device *hdev);
-       void (*enable_clock_gating)(struct hl_device *hdev);
-       void (*disable_clock_gating)(struct hl_device *hdev);
-       bool (*is_device_idle)(struct hl_device *hdev);
+       int (*debug_coresight)(struct hl_device *hdev, void *data);
+       bool (*is_device_idle)(struct hl_device *hdev, char *buf, size_t size);
        int (*soft_reset_late_init)(struct hl_device *hdev);
        void (*hw_queues_lock)(struct hl_device *hdev);
        void (*hw_queues_unlock)(struct hl_device *hdev);
@@ -555,6 +563,9 @@ struct hl_asic_funcs {
        int (*send_cpu_message)(struct hl_device *hdev, u32 *msg,
                                u16 len, u32 timeout, long *result);
        enum hl_device_hw_state (*get_hw_state)(struct hl_device *hdev);
+       int (*pci_bars_map)(struct hl_device *hdev);
+       int (*set_dram_bar_base)(struct hl_device *hdev, u64 addr);
+       int (*init_iatu)(struct hl_device *hdev);
 };
 
 
@@ -582,7 +593,8 @@ struct hl_va_range {
  * struct hl_ctx - user/kernel context.
  * @mem_hash: holds mapping from virtual address to virtual memory area
  *             descriptor (hl_vm_phys_pg_list or hl_userptr).
- * @mmu_hash: holds a mapping from virtual address to pgt_info structure.
+ * @mmu_phys_hash: holds a mapping from physical address to pgt_info structure.
+ * @mmu_shadow_hash: holds a mapping from shadow address to pgt_info structure.
  * @hpriv: pointer to the private (KMD) data of the process (fd).
  * @hdev: pointer to the device structure.
  * @refcount: reference counter for the context. Context is released only when
@@ -611,7 +623,8 @@ struct hl_va_range {
  */
 struct hl_ctx {
        DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS);
-       DECLARE_HASHTABLE(mmu_hash, MMU_HASH_TABLE_BITS);
+       DECLARE_HASHTABLE(mmu_phys_hash, MMU_HASH_TABLE_BITS);
+       DECLARE_HASHTABLE(mmu_shadow_hash, MMU_HASH_TABLE_BITS);
        struct hl_fpriv         *hpriv;
        struct hl_device        *hdev;
        struct kref             refcount;
@@ -850,6 +863,29 @@ struct hl_vm {
        u8                      init_done;
 };
 
+
+/*
+ * DEBUG, PROFILING STRUCTURE
+ */
+
+/**
+ * struct hl_debug_params - Coresight debug parameters.
+ * @input: pointer to component specific input parameters.
+ * @output: pointer to component specific output parameters.
+ * @output_size: size of output buffer.
+ * @reg_idx: relevant register ID.
+ * @op: component operation to execute.
+ * @enable: true if to enable component debugging, false otherwise.
+ */
+struct hl_debug_params {
+       void *input;
+       void *output;
+       u32 output_size;
+       u32 reg_idx;
+       u32 op;
+       bool enable;
+};
+
 /*
  * FILE PRIVATE STRUCTURE
  */
@@ -997,6 +1033,12 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
        WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | \
                        (val) << REG_FIELD_SHIFT(reg, field))
 
+#define HL_ENG_BUSY(buf, size, fmt, ...) ({ \
+               if (buf) \
+                       snprintf(buf, size, fmt, ##__VA_ARGS__); \
+               false; \
+       })
+
 struct hwmon_chip_info;
 
 /**
@@ -1047,7 +1089,8 @@ struct hl_device_reset_work {
  * @asic_specific: ASIC specific information to use only from ASIC files.
  * @mmu_pgt_pool: pool of available MMU hops.
  * @vm: virtual memory manager for MMU.
- * @mmu_cache_lock: protects MMU cache invalidation as it can serve one context
+ * @mmu_cache_lock: protects MMU cache invalidation as it can serve one context.
+ * @mmu_shadow_hop0: shadow mapping of the MMU hop 0 zone.
  * @hwmon_dev: H/W monitor device.
  * @pm_mng_profile: current power management profile.
  * @hl_chip_info: ASIC's sensors information.
@@ -1082,6 +1125,7 @@ struct hl_device_reset_work {
  * @init_done: is the initialization of the device done.
  * @mmu_enable: is MMU enabled.
  * @device_cpu_disabled: is the device CPU disabled (due to timeouts)
+ * @dma_mask: the dma mask that was set for this device
  */
 struct hl_device {
        struct pci_dev                  *pdev;
@@ -1117,6 +1161,7 @@ struct hl_device {
        struct gen_pool                 *mmu_pgt_pool;
        struct hl_vm                    vm;
        struct mutex                    mmu_cache_lock;
+       void                            *mmu_shadow_hop0;
        struct device                   *hwmon_dev;
        enum hl_pm_mng_profile          pm_mng_profile;
        struct hwmon_chip_info          *hl_chip_info;
@@ -1151,6 +1196,7 @@ struct hl_device {
        u8                              dram_default_page_mapping;
        u8                              init_done;
        u8                              device_cpu_disabled;
+       u8                              dma_mask;
 
        /* Parameters for bring-up */
        u8                              mmu_enable;
@@ -1245,6 +1291,7 @@ static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
 
 int hl_device_open(struct inode *inode, struct file *filp);
 bool hl_device_disabled_or_in_reset(struct hl_device *hdev);
+enum hl_device_status hl_device_status(struct hl_device *hdev);
 int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
                enum hl_asic_type asic_type, int minor);
 void destroy_hdev(struct hl_device *hdev);
@@ -1351,6 +1398,31 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size);
 void hl_mmu_swap_out(struct hl_ctx *ctx);
 void hl_mmu_swap_in(struct hl_ctx *ctx);
 
+int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
+                               void __iomem *dst);
+int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode);
+int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
+                               u16 len, u32 timeout, long *result);
+int hl_fw_test_cpu_queue(struct hl_device *hdev);
+void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
+                                               dma_addr_t *dma_handle);
+void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
+                                       void *vaddr);
+int hl_fw_send_heartbeat(struct hl_device *hdev);
+int hl_fw_armcp_info_get(struct hl_device *hdev);
+int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size);
+
+int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
+                       bool is_wc[3]);
+int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data);
+int hl_pci_set_dram_bar_base(struct hl_device *hdev, u8 inbound_region, u8 bar,
+                               u64 addr);
+int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
+                       u64 dram_base_address, u64 host_phys_size);
+int hl_pci_init(struct hl_device *hdev, u8 dma_mask);
+void hl_pci_fini(struct hl_device *hdev);
+int hl_pci_set_dma_mask(struct hl_device *hdev, u8 dma_mask);
+
 long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr);
 void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq);
 long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr);
index 748601463f111b5e7de0f7fcef0520fd829914e8..1667df7ca64c53515e448ab0d7e1d1acbe3ec8f6 100644 (file)
@@ -218,7 +218,7 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
        hdev->disabled = true;
        hdev->pdev = pdev; /* can be NULL in case of simulator device */
 
-       if (asic_type == ASIC_AUTO_DETECT) {
+       if (pdev) {
                hdev->asic_type = get_asic_type(pdev->device);
                if (hdev->asic_type == ASIC_INVALID) {
                        dev_err(&pdev->dev, "Unsupported ASIC\n");
@@ -229,6 +229,9 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
                hdev->asic_type = asic_type;
        }
 
+       /* Set default DMA mask to 32 bits */
+       hdev->dma_mask = 32;
+
        mutex_lock(&hl_devs_idr_lock);
 
        if (minor == -1) {
@@ -334,7 +337,7 @@ static int hl_pci_probe(struct pci_dev *pdev,
                 " device found [%04x:%04x] (rev %x)\n",
                 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
 
-       rc = create_hdev(&hdev, pdev, ASIC_AUTO_DETECT, -1);
+       rc = create_hdev(&hdev, pdev, ASIC_INVALID, -1);
        if (rc)
                return rc;
 
index 2c2739a3c5ec7ce3c3b415a25675b13310670bfa..eeefb22023e9a1cac01c46dc196cccf98574629a 100644 (file)
 #include <linux/uaccess.h>
 #include <linux/slab.h>
 
+static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = {
+       [HL_DEBUG_OP_ETR] = sizeof(struct hl_debug_params_etr),
+       [HL_DEBUG_OP_ETF] = sizeof(struct hl_debug_params_etf),
+       [HL_DEBUG_OP_STM] = sizeof(struct hl_debug_params_stm),
+       [HL_DEBUG_OP_FUNNEL] = 0,
+       [HL_DEBUG_OP_BMON] = sizeof(struct hl_debug_params_bmon),
+       [HL_DEBUG_OP_SPMU] = sizeof(struct hl_debug_params_spmu),
+       [HL_DEBUG_OP_TIMESTAMP] = 0
+
+};
+
+static int device_status_info(struct hl_device *hdev, struct hl_info_args *args)
+{
+       struct hl_info_device_status dev_stat = {0};
+       u32 size = args->return_size;
+       void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+       if ((!size) || (!out))
+               return -EINVAL;
+
+       dev_stat.status = hl_device_status(hdev);
+
+       return copy_to_user(out, &dev_stat,
+                       min((size_t)size, sizeof(dev_stat))) ? -EFAULT : 0;
+}
+
 static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
 {
        struct hl_info_hw_ip_info hw_ip = {0};
@@ -93,21 +119,91 @@ static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
        if ((!max_size) || (!out))
                return -EINVAL;
 
-       hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev);
+       hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev, NULL, 0);
 
        return copy_to_user(out, &hw_idle,
                min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
 }
 
+static int debug_coresight(struct hl_device *hdev, struct hl_debug_args *args)
+{
+       struct hl_debug_params *params;
+       void *input = NULL, *output = NULL;
+       int rc;
+
+       params = kzalloc(sizeof(*params), GFP_KERNEL);
+       if (!params)
+               return -ENOMEM;
+
+       params->reg_idx = args->reg_idx;
+       params->enable = args->enable;
+       params->op = args->op;
+
+       if (args->input_ptr && args->input_size) {
+               input = memdup_user((const void __user *) args->input_ptr,
+                                       args->input_size);
+               if (IS_ERR(input)) {
+                       rc = PTR_ERR(input);
+                       input = NULL;
+                       dev_err(hdev->dev,
+                               "error %d when copying input debug data\n", rc);
+                       goto out;
+               }
+
+               params->input = input;
+       }
+
+       if (args->output_ptr && args->output_size) {
+               output = kzalloc(args->output_size, GFP_KERNEL);
+               if (!output) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+
+               params->output = output;
+               params->output_size = args->output_size;
+       }
+
+       rc = hdev->asic_funcs->debug_coresight(hdev, params);
+       if (rc) {
+               dev_err(hdev->dev,
+                       "debug coresight operation failed %d\n", rc);
+               goto out;
+       }
+
+       if (output) {
+               if (copy_to_user((void __user *) (uintptr_t) args->output_ptr,
+                                       output,
+                                       args->output_size)) {
+                       dev_err(hdev->dev,
+                               "copy to user failed in debug ioctl\n");
+                       rc = -EFAULT;
+                       goto out;
+               }
+       }
+
+out:
+       kfree(params);
+       kfree(output);
+       kfree(input);
+
+       return rc;
+}
+
 static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
 {
        struct hl_info_args *args = data;
        struct hl_device *hdev = hpriv->hdev;
        int rc;
 
+       /* We want to return device status even if it disabled or in reset */
+       if (args->op == HL_INFO_DEVICE_STATUS)
+               return device_status_info(hdev, args);
+
        if (hl_device_disabled_or_in_reset(hdev)) {
-               dev_err(hdev->dev,
-                       "Device is disabled or in reset. Can't execute INFO IOCTL\n");
+               dev_warn_ratelimited(hdev->dev,
+                       "Device is %s. Can't execute INFO IOCTL\n",
+                       atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
                return -EBUSY;
        }
 
@@ -137,6 +233,40 @@ static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
        return rc;
 }
 
+static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
+{
+       struct hl_debug_args *args = data;
+       struct hl_device *hdev = hpriv->hdev;
+       int rc = 0;
+
+       if (hl_device_disabled_or_in_reset(hdev)) {
+               dev_warn_ratelimited(hdev->dev,
+                       "Device is %s. Can't execute DEBUG IOCTL\n",
+                       atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
+               return -EBUSY;
+       }
+
+       switch (args->op) {
+       case HL_DEBUG_OP_ETR:
+       case HL_DEBUG_OP_ETF:
+       case HL_DEBUG_OP_STM:
+       case HL_DEBUG_OP_FUNNEL:
+       case HL_DEBUG_OP_BMON:
+       case HL_DEBUG_OP_SPMU:
+       case HL_DEBUG_OP_TIMESTAMP:
+               args->input_size =
+                       min(args->input_size, hl_debug_struct_size[args->op]);
+               rc = debug_coresight(hdev, args);
+               break;
+       default:
+               dev_err(hdev->dev, "Invalid request %d\n", args->op);
+               rc = -ENOTTY;
+               break;
+       }
+
+       return rc;
+}
+
 #define HL_IOCTL_DEF(ioctl, _func) \
        [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func}
 
@@ -145,7 +275,8 @@ static const struct hl_ioctl_desc hl_ioctls[] = {
        HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl),
        HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl),
        HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_cs_wait_ioctl),
-       HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl)
+       HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl),
+       HL_IOCTL_DEF(HL_IOCTL_DEBUG, hl_debug_ioctl)
 };
 
 #define HL_CORE_IOCTL_COUNT    ARRAY_SIZE(hl_ioctls)
index 9dddb917e72cc39208f8b612dba41279aaa82d04..c8f28cadc3354bcb35bac9bb12824a036d50d2a3 100644 (file)
@@ -32,8 +32,6 @@ struct hl_eq_entry {
 #define EQ_CTL_EVENT_TYPE_SHIFT                16
 #define EQ_CTL_EVENT_TYPE_MASK         0x03FF0000
 
-#define EVENT_QUEUE_MSIX_IDX           5
-
 enum pq_init_status {
        PQ_INIT_STATUS_NA = 0,
        PQ_INIT_STATUS_READY_FOR_CP,
@@ -302,6 +300,14 @@ enum armcp_pwm_attributes {
        armcp_pwm_enable
 };
 
+#define HL_CPU_PKT_SHIFT               5
+#define HL_CPU_PKT_SIZE                        (1 << HL_CPU_PKT_SHIFT)
+#define HL_CPU_PKT_MASK                        (~((1 << HL_CPU_PKT_SHIFT) - 1))
+#define HL_CPU_MAX_PKTS_IN_CB          32
+#define HL_CPU_CB_SIZE                 (HL_CPU_PKT_SIZE * \
+                                        HL_CPU_MAX_PKTS_IN_CB)
+#define HL_CPU_ACCESSIBLE_MEM_SIZE     (HL_QUEUE_LENGTH * HL_CPU_CB_SIZE)
+
 /* Event Queue Packets */
 
 struct eq_generic_event {
index 2cf5c46b6e8ec7d90b22e34becf586e13b4bc8d3..4e0dbbbbde209b3b8c2ba5f762843129fa57508d 100644 (file)
 #define CPU_CA53_CFG_ARM_PMU_EVENT_MASK                              0x3FFFFFFF
 
 #endif /* ASIC_REG_CPU_CA53_CFG_MASKS_H_ */
-
index 840ccffa1081a7858befb1de55ed9021f8eed6c5..f3faf1aad91a7978b30d518e72cb0c039b164c62 100644 (file)
@@ -58,4 +58,3 @@
 #define mmCPU_CA53_CFG_ARM_PMU_1                                     0x441214
 
 #endif /* ASIC_REG_CPU_CA53_CFG_REGS_H_ */
-
index f23cb3e41c30885317dc883741aa3c3004fb33e0..cf657918962a246ade31e6f67f1848084c25c845 100644 (file)
@@ -46,4 +46,3 @@
 #define mmCPU_IF_AXI_SPLIT_INTR                                      0x442130
 
 #endif /* ASIC_REG_CPU_IF_REGS_H_ */
-
index 8fc97f838ada8b952a4dfccd61d1e5856a82346a..8c8f9726d4b904c31aa3000bd70224764539ff44 100644 (file)
 #define mmCPU_PLL_FREQ_CALC_EN                                       0x4A2440
 
 #endif /* ASIC_REG_CPU_PLL_REGS_H_ */
-
index 61c8cd9ce58b50a181d1ee152d792dc46e3bed98..0b246fe6ad042ae09c51dc47834f9429d821ca16 100644 (file)
 #define mmDMA_CH_0_MEM_INIT_BUSY                                     0x4011FC
 
 #endif /* ASIC_REG_DMA_CH_0_REGS_H_ */
-
index 92960ef5e308aafa96e4867f8cd3fd63b783a0d7..5449031722f21d2bd38543c9b9fd68eab3f99476 100644 (file)
 #define mmDMA_CH_1_MEM_INIT_BUSY                                     0x4091FC
 
 #endif /* ASIC_REG_DMA_CH_1_REGS_H_ */
-
index 4e37871a51bb2e24cd6f6d4bcfa4b7a23993b8ff..a4768521d18a8dcdfb650d6f2fb447b2b5eee288 100644 (file)
 #define mmDMA_CH_2_MEM_INIT_BUSY                                     0x4111FC
 
 #endif /* ASIC_REG_DMA_CH_2_REGS_H_ */
-
index a2d6aeb32a1847f7777ce8da0f25ac7e3275a18f..619d01897ff8d71eb284f6092550c841a317e7bd 100644 (file)
 #define mmDMA_CH_3_MEM_INIT_BUSY                                     0x4191FC
 
 #endif /* ASIC_REG_DMA_CH_3_REGS_H_ */
-
index 400d6fd3acf5affb595a324fe1be0895be942564..038617e163f1942696740654a0ff76dff85b9ce7 100644 (file)
 #define mmDMA_CH_4_MEM_INIT_BUSY                                     0x4211FC
 
 #endif /* ASIC_REG_DMA_CH_4_REGS_H_ */
-
index 8d965443c51efd2bb87c49db32612eb51bca0c58..f43b564af1beef205186c10e471a7176daba5bcf 100644 (file)
 #define DMA_MACRO_RAZWI_HBW_RD_ID_R_MASK                             0x1FFFFFFF
 
 #endif /* ASIC_REG_DMA_MACRO_MASKS_H_ */
-
index 8bfcb001189d7b723333f0e65f0d6552ce15fddd..c3bfc1b8e3fde817455a4bfff3f769df18e3cbfc 100644 (file)
 #define mmDMA_MACRO_RAZWI_HBW_RD_ID                                  0x4B0158
 
 #endif /* ASIC_REG_DMA_MACRO_REGS_H_ */
-
index 9f33f351a3c1fe6780104e239918b326f096645a..bc977488c0720bab38c413d0ed112fd244772651 100644 (file)
 #define DMA_NRTR_NON_LIN_SCRAMB_EN_MASK                              0x1
 
 #endif /* ASIC_REG_DMA_NRTR_MASKS_H_ */
-
index d8293745a02b722bb9d2bb05fa003539a2e782a6..c4abc7ff1fc6f5e41d03713354c81a17d2ad1671 100644 (file)
 #define mmDMA_NRTR_NON_LIN_SCRAMB                                    0x1C0604
 
 #endif /* ASIC_REG_DMA_NRTR_REGS_H_ */
-
index 10619dbb9b1729951fb4acda81f4c1f24fc6bf50..b17f72c31ab60d672788442a740da30bfed1aa0b 100644 (file)
 #define DMA_QM_0_CQ_BUF_RDATA_VAL_MASK                               0xFFFFFFFF
 
 #endif /* ASIC_REG_DMA_QM_0_MASKS_H_ */
-
index c693bc5dcb22d0351df3115a67ea5ddb2961b66b..bf360b301154b755993bb0894a060e8e853f75d2 100644 (file)
 #define mmDMA_QM_0_CQ_BUF_RDATA                                      0x40030C
 
 #endif /* ASIC_REG_DMA_QM_0_REGS_H_ */
-
index da928390f89c97e41049dbc30b40545a257c90a7..51d432d05ac433a8f537e9b89ce6c04c10a3f68a 100644 (file)
 #define mmDMA_QM_1_CQ_BUF_RDATA                                      0x40830C
 
 #endif /* ASIC_REG_DMA_QM_1_REGS_H_ */
-
index b4f06e9b71d6189ff2783ff98ec83807532195cf..18fc0c2b6cc27aceaa08476f445e752c9c95fd6c 100644 (file)
 #define mmDMA_QM_2_CQ_BUF_RDATA                                      0x41030C
 
 #endif /* ASIC_REG_DMA_QM_2_REGS_H_ */
-
index 53e3cd78a06bc785c417bbe934eba9d4bd6e45d3..6cf7204bf5cc692907bf87c59cb453d4be15a8b6 100644 (file)
 #define mmDMA_QM_3_CQ_BUF_RDATA                                      0x41830C
 
 #endif /* ASIC_REG_DMA_QM_3_REGS_H_ */
-
index e0eb5f2602011efc4326ab6e894bc5d0214590c2..36fef2682875bb64a2c47ce84bdb87bac6b4a5a2 100644 (file)
 #define mmDMA_QM_4_CQ_BUF_RDATA                                      0x42030C
 
 #endif /* ASIC_REG_DMA_QM_4_REGS_H_ */
-
index a161ecfe74de031705756f62b2efc1c98d1cfb64..8618891d5afabde368810e30f99f4c103f51ffbb 100644 (file)
                        1 << CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_SHIFT |\
                        1 << CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT)
 
-/* PCI CONFIGURATION SPACE */
-#define mmPCI_CONFIG_ELBI_ADDR         0xFF0
-#define mmPCI_CONFIG_ELBI_DATA         0xFF4
-#define mmPCI_CONFIG_ELBI_CTRL         0xFF8
-#define PCI_CONFIG_ELBI_CTRL_WRITE     (1 << 31)
-
-#define mmPCI_CONFIG_ELBI_STS          0xFFC
-#define PCI_CONFIG_ELBI_STS_ERR                (1 << 30)
-#define PCI_CONFIG_ELBI_STS_DONE       (1 << 31)
-#define PCI_CONFIG_ELBI_STS_MASK       (PCI_CONFIG_ELBI_STS_ERR | \
-                                       PCI_CONFIG_ELBI_STS_DONE)
-
 #define GOYA_IRQ_HBW_ID_MASK                   0x1FFF
 #define GOYA_IRQ_HBW_ID_SHIFT                  0
 #define GOYA_IRQ_HBW_INTERNAL_ID_MASK          0xE000
index 6cb0b6e54d417cf9265d1c99c444d7b2df433acd..506e71e201e10dc417c1eeb32d3ac3a41e901dfa 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0
  *
- * Copyright 2016-2018 HabanaLabs, Ltd.
+ * Copyright 2016-2019 HabanaLabs, Ltd.
  * All Rights Reserved.
  *
  */
@@ -12,6 +12,7 @@
 #include "stlb_regs.h"
 #include "mmu_regs.h"
 #include "pcie_aux_regs.h"
+#include "pcie_wrap_regs.h"
 #include "psoc_global_conf_regs.h"
 #include "psoc_spi_regs.h"
 #include "psoc_mme_pll_regs.h"
index 0a743817aad7d1f5495d4afe98052712fc09ea3d..4ae7fed8b18cfdfea00bf3c290e930dd83ad2af1 100644 (file)
 #define mmIC_PLL_FREQ_CALC_EN                                        0x4A3440
 
 #endif /* ASIC_REG_IC_PLL_REGS_H_ */
-
index 4408188aa06793083f1f31fa5824082a55697240..6d35d852798b449bdfcc01dc77b820a3b30855bf 100644 (file)
 #define mmMC_PLL_FREQ_CALC_EN                                        0x4A1440
 
 #endif /* ASIC_REG_MC_PLL_REGS_H_ */
-
index 687bca5c5fe34527b4a139392bd211b0cca10b97..6c23f8b96e7ef371bfa6b142efdc81e1d481896d 100644 (file)
 #define MME1_RTR_NON_LIN_SCRAMB_EN_MASK                              0x1
 
 #endif /* ASIC_REG_MME1_RTR_MASKS_H_ */
-
index c248339a1cbebf6af58e6a43058ee4a8dcef6a69..122e9d529939e6d68b18c6067e8769248ccb90f6 100644 (file)
 #define mmMME1_RTR_NON_LIN_SCRAMB                                    0x40604
 
 #endif /* ASIC_REG_MME1_RTR_REGS_H_ */
-
index 7a2b777bdc4ff92ad87e5e733c443cc8f44668b1..00ce2252bbfbe82efe10d3a1788ece96df5302c7 100644 (file)
 #define mmMME2_RTR_NON_LIN_SCRAMB                                    0x80604
 
 #endif /* ASIC_REG_MME2_RTR_REGS_H_ */
-
index b78f8bc387fc750c58922d5039e341c482868cff..8e3eb7fd207026b73830159233a67e8e5e115484 100644 (file)
 #define mmMME3_RTR_NON_LIN_SCRAMB                                    0xC0604
 
 #endif /* ASIC_REG_MME3_RTR_REGS_H_ */
-
index d9a4a02cefa3b34c24515411ce18bed9a78c8c2f..79b67bbc8567ee8307453edb70cbb94e8089e70b 100644 (file)
 #define mmMME4_RTR_NON_LIN_SCRAMB                                    0x100604
 
 #endif /* ASIC_REG_MME4_RTR_REGS_H_ */
-
index 205adc988407fbd9f774e8a6123f4cb4e3f4382c..0ac3c37ce47ffc5a55db6ac470814c02f198c3ee 100644 (file)
 #define mmMME5_RTR_NON_LIN_SCRAMB                                    0x140604
 
 #endif /* ASIC_REG_MME5_RTR_REGS_H_ */
-
index fcec68388278acd0326da5232d223ed1e3cc6efb..50c49cce72a64884eb63a5d621aacb4e022e2fb6 100644 (file)
 #define mmMME6_RTR_NON_LIN_SCRAMB                                    0x180604
 
 #endif /* ASIC_REG_MME6_RTR_REGS_H_ */
-
index a0d4382fbbd075c41e34a99ffcd38f50b0cc2d8c..fe7d95bdcef9c0270ff4af69e5b1074ad52fbcde 100644 (file)
 #define MME_CMDQ_CQ_BUF_RDATA_VAL_MASK                               0xFFFFFFFF
 
 #endif /* ASIC_REG_MME_CMDQ_MASKS_H_ */
-
index 5c2f6b870a58b0a4078d7451ac0d40ff0ae8ab00..5f8b85d2b4b1b0eecd086afea9d9a8d28a9fd270 100644 (file)
 #define mmMME_CMDQ_CQ_BUF_RDATA                                      0xD930C
 
 #endif /* ASIC_REG_MME_CMDQ_REGS_H_ */
-
index c7b1b0bb33841301f003f0c5b93a78253d32349d..1882c413cbe0432bf6f2ad3042d1a38fac8c91ec 100644 (file)
 #define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_ID_MASK                     0xFF000000
 
 #endif /* ASIC_REG_MME_MASKS_H_ */
-
index d4bfa58dce1997b6691ca168ba5eef4a0377dab8..e464e381555c40de405e63364d0dbca71c82c95c 100644 (file)
 #define MME_QM_CQ_BUF_RDATA_VAL_MASK                                 0xFFFFFFFF
 
 #endif /* ASIC_REG_MME_QM_MASKS_H_ */
-
index b5b1c776f6c3b440bce874656db8938d068f7a4c..538708beffc9c593c3f34cef9f8e540732f7807b 100644 (file)
 #define mmMME_QM_CQ_BUF_RDATA                                        0xD830C
 
 #endif /* ASIC_REG_MME_QM_REGS_H_ */
-
index 9436b1e2705a25140a593d7146039c428bd4406d..0396cbfd5c8903981325b910663047c8ef0b7187 100644 (file)
 #define mmMME_SHADOW_3_E_BUBBLES_PER_SPLIT                           0xD0BAC
 
 #endif /* ASIC_REG_MME_REGS_H_ */
-
index 3a78078d3c4c65b6ff33eb9ff14ffb472f83505a..c3e69062b135aa66193efeabdfb1e5bfd3dc0bef 100644 (file)
 #define MMU_ACCESS_ERROR_CAPTURE_VA_VA_31_0_MASK                     0xFFFFFFFF
 
 #endif /* ASIC_REG_MMU_MASKS_H_ */
-
index bec6c014135cc4a4711a134c12077ae318d8348f..7ec81f12031e0917a25e3274db529ad0f2540006 100644 (file)
@@ -50,4 +50,3 @@
 #define mmMMU_ACCESS_ERROR_CAPTURE_VA                                0x480040
 
 #endif /* ASIC_REG_MMU_REGS_H_ */
-
index 209e41402a11188c088c7462d9b5b020aec8223a..ceb59f2e28b3f1560ce3b748fa46e67c9e1fea1e 100644 (file)
 #define PCI_NRTR_NON_LIN_SCRAMB_EN_MASK                              0x1
 
 #endif /* ASIC_REG_PCI_NRTR_MASKS_H_ */
-
index 447e5d4e7dc81580c71c0fbc3539c31b7d0d7e9a..dd067f301ac2ca4e5e489ad1b4cdcdd577dcff5c 100644 (file)
 #define mmPCI_NRTR_NON_LIN_SCRAMB                                    0x604
 
 #endif /* ASIC_REG_PCI_NRTR_REGS_H_ */
-
index daaf5d9079dc3036295fd4d1b399bf2ac64b5f2a..35b1d8ac6f63f922bcaaf89dd043ceae87704a20 100644 (file)
 #define mmPCIE_AUX_PERST                                             0xC079B8
 
 #endif /* ASIC_REG_PCIE_AUX_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h
new file mode 100644 (file)
index 0000000..d1e55aa
--- /dev/null
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ **       DO NOT EDIT BELOW        **
+ ************************************/
+
+#ifndef ASIC_REG_PCIE_WRAP_REGS_H_
+#define ASIC_REG_PCIE_WRAP_REGS_H_
+
+/*
+ *****************************************
+ *   PCIE_WRAP (Prototype: PCIE_WRAP)
+ *****************************************
+ */
+
+#define mmPCIE_WRAP_PHY_RST_N                                        0xC01300
+
+#define mmPCIE_WRAP_OUTSTAND_TRANS                                   0xC01400
+
+#define mmPCIE_WRAP_MASK_REQ                                         0xC01404
+
+#define mmPCIE_WRAP_IND_AWADDR_L                                     0xC01500
+
+#define mmPCIE_WRAP_IND_AWADDR_H                                     0xC01504
+
+#define mmPCIE_WRAP_IND_AWLEN                                        0xC01508
+
+#define mmPCIE_WRAP_IND_AWSIZE                                       0xC0150C
+
+#define mmPCIE_WRAP_IND_AWBURST                                      0xC01510
+
+#define mmPCIE_WRAP_IND_AWLOCK                                       0xC01514
+
+#define mmPCIE_WRAP_IND_AWCACHE                                      0xC01518
+
+#define mmPCIE_WRAP_IND_AWPROT                                       0xC0151C
+
+#define mmPCIE_WRAP_IND_AWVALID                                      0xC01520
+
+#define mmPCIE_WRAP_IND_WDATA_0                                      0xC01524
+
+#define mmPCIE_WRAP_IND_WDATA_1                                      0xC01528
+
+#define mmPCIE_WRAP_IND_WDATA_2                                      0xC0152C
+
+#define mmPCIE_WRAP_IND_WDATA_3                                      0xC01530
+
+#define mmPCIE_WRAP_IND_WSTRB                                        0xC01544
+
+#define mmPCIE_WRAP_IND_WLAST                                        0xC01548
+
+#define mmPCIE_WRAP_IND_WVALID                                       0xC0154C
+
+#define mmPCIE_WRAP_IND_BRESP                                        0xC01550
+
+#define mmPCIE_WRAP_IND_BVALID                                       0xC01554
+
+#define mmPCIE_WRAP_IND_ARADDR_0                                     0xC01558
+
+#define mmPCIE_WRAP_IND_ARADDR_1                                     0xC0155C
+
+#define mmPCIE_WRAP_IND_ARLEN                                        0xC01560
+
+#define mmPCIE_WRAP_IND_ARSIZE                                       0xC01564
+
+#define mmPCIE_WRAP_IND_ARBURST                                      0xC01568
+
+#define mmPCIE_WRAP_IND_ARLOCK                                       0xC0156C
+
+#define mmPCIE_WRAP_IND_ARCACHE                                      0xC01570
+
+#define mmPCIE_WRAP_IND_ARPROT                                       0xC01574
+
+#define mmPCIE_WRAP_IND_ARVALID                                      0xC01578
+
+#define mmPCIE_WRAP_IND_RDATA_0                                      0xC0157C
+
+#define mmPCIE_WRAP_IND_RDATA_1                                      0xC01580
+
+#define mmPCIE_WRAP_IND_RDATA_2                                      0xC01584
+
+#define mmPCIE_WRAP_IND_RDATA_3                                      0xC01588
+
+#define mmPCIE_WRAP_IND_RLAST                                        0xC0159C
+
+#define mmPCIE_WRAP_IND_RRESP                                        0xC015A0
+
+#define mmPCIE_WRAP_IND_RVALID                                       0xC015A4
+
+#define mmPCIE_WRAP_IND_AWMISC_INFO                                  0xC015A8
+
+#define mmPCIE_WRAP_IND_AWMISC_INFO_HDR_34DW_0                       0xC015AC
+
+#define mmPCIE_WRAP_IND_AWMISC_INFO_HDR_34DW_1                       0xC015B0
+
+#define mmPCIE_WRAP_IND_AWMISC_INFO_P_TAG                            0xC015B4
+
+#define mmPCIE_WRAP_IND_AWMISC_INFO_ATU_BYPAS                        0xC015B8
+
+#define mmPCIE_WRAP_IND_AWMISC_INFO_FUNC_NUM                         0xC015BC
+
+#define mmPCIE_WRAP_IND_AWMISC_INFO_VFUNC_ACT                        0xC015C0
+
+#define mmPCIE_WRAP_IND_AWMISC_INFO_VFUNC_NUM                        0xC015C4
+
+#define mmPCIE_WRAP_IND_AWMISC_INFO_TLPPRFX                          0xC015C8
+
+#define mmPCIE_WRAP_IND_ARMISC_INFO                                  0xC015CC
+
+#define mmPCIE_WRAP_IND_ARMISC_INFO_TLPPRFX                          0xC015D0
+
+#define mmPCIE_WRAP_IND_ARMISC_INFO_ATU_BYP                          0xC015D4
+
+#define mmPCIE_WRAP_IND_ARMISC_INFO_FUNC_NUM                         0xC015D8
+
+#define mmPCIE_WRAP_IND_ARMISC_INFO_VFUNC_ACT                        0xC015DC
+
+#define mmPCIE_WRAP_IND_ARMISC_INFO_VFUNC_NUM                        0xC015E0
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO                                  0xC01800
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO_HDR_34DW_0                       0xC01804
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO_HDR_34DW_1                       0xC01808
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO_P_TAG                            0xC0180C
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO_ATU_BYPAS                        0xC01810
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO_FUNC_NUM                         0xC01814
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO_VFUNC_ACT                        0xC01818
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO_VFUNC_NUM                        0xC0181C
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO_TLPPRFX                          0xC01820
+
+#define mmPCIE_WRAP_SLV_ARMISC_INFO                                  0xC01824
+
+#define mmPCIE_WRAP_SLV_ARMISC_INFO_TLPPRFX                          0xC01828
+
+#define mmPCIE_WRAP_SLV_ARMISC_INFO_ATU_BYP                          0xC0182C
+
+#define mmPCIE_WRAP_SLV_ARMISC_INFO_FUNC_NUM                         0xC01830
+
+#define mmPCIE_WRAP_SLV_ARMISC_INFO_VFUNC_ACT                        0xC01834
+
+#define mmPCIE_WRAP_SLV_ARMISC_INFO_VFUNC_NUM                        0xC01838
+
+#define mmPCIE_WRAP_MAX_QID                                          0xC01900
+
+#define mmPCIE_WRAP_DB_BASE_ADDR_L_0                                 0xC01910
+
+#define mmPCIE_WRAP_DB_BASE_ADDR_L_1                                 0xC01914
+
+#define mmPCIE_WRAP_DB_BASE_ADDR_L_2                                 0xC01918
+
+#define mmPCIE_WRAP_DB_BASE_ADDR_L_3                                 0xC0191C
+
+#define mmPCIE_WRAP_DB_BASE_ADDR_H_0                                 0xC01920
+
+#define mmPCIE_WRAP_DB_BASE_ADDR_H_1                                 0xC01924
+
+#define mmPCIE_WRAP_DB_BASE_ADDR_H_2                                 0xC01928
+
+#define mmPCIE_WRAP_DB_BASE_ADDR_H_3                                 0xC0192C
+
+#define mmPCIE_WRAP_DB_MASK                                          0xC01940
+
+#define mmPCIE_WRAP_SQ_BASE_ADDR_H                                   0xC01A00
+
+#define mmPCIE_WRAP_SQ_BASE_ADDR_L                                   0xC01A04
+
+#define mmPCIE_WRAP_SQ_STRIDE_ACCRESS                                0xC01A08
+
+#define mmPCIE_WRAP_SQ_POP_CMD                                       0xC01A10
+
+#define mmPCIE_WRAP_SQ_POP_DATA                                      0xC01A14
+
+#define mmPCIE_WRAP_DB_INTR_0                                        0xC01A20
+
+#define mmPCIE_WRAP_DB_INTR_1                                        0xC01A24
+
+#define mmPCIE_WRAP_DB_INTR_2                                        0xC01A28
+
+#define mmPCIE_WRAP_DB_INTR_3                                        0xC01A2C
+
+#define mmPCIE_WRAP_DB_INTR_4                                        0xC01A30
+
+#define mmPCIE_WRAP_DB_INTR_5                                        0xC01A34
+
+#define mmPCIE_WRAP_DB_INTR_6                                        0xC01A38
+
+#define mmPCIE_WRAP_DB_INTR_7                                        0xC01A3C
+
+#define mmPCIE_WRAP_MMU_BYPASS_DMA                                   0xC01A80
+
+#define mmPCIE_WRAP_MMU_BYPASS_NON_DMA                               0xC01A84
+
+#define mmPCIE_WRAP_ASID_NON_DMA                                     0xC01A90
+
+#define mmPCIE_WRAP_ASID_DMA_0                                       0xC01AA0
+
+#define mmPCIE_WRAP_ASID_DMA_1                                       0xC01AA4
+
+#define mmPCIE_WRAP_ASID_DMA_2                                       0xC01AA8
+
+#define mmPCIE_WRAP_ASID_DMA_3                                       0xC01AAC
+
+#define mmPCIE_WRAP_ASID_DMA_4                                       0xC01AB0
+
+#define mmPCIE_WRAP_ASID_DMA_5                                       0xC01AB4
+
+#define mmPCIE_WRAP_ASID_DMA_6                                       0xC01AB8
+
+#define mmPCIE_WRAP_ASID_DMA_7                                       0xC01ABC
+
+#define mmPCIE_WRAP_CPU_HOT_RST                                      0xC01AE0
+
+#define mmPCIE_WRAP_AXI_PROT_OVR                                     0xC01AE4
+
+#define mmPCIE_WRAP_CACHE_OVR                                        0xC01B00
+
+#define mmPCIE_WRAP_LOCK_OVR                                         0xC01B04
+
+#define mmPCIE_WRAP_PROT_OVR                                         0xC01B08
+
+#define mmPCIE_WRAP_ARUSER_OVR                                       0xC01B0C
+
+#define mmPCIE_WRAP_AWUSER_OVR                                       0xC01B10
+
+#define mmPCIE_WRAP_ARUSER_OVR_EN                                    0xC01B14
+
+#define mmPCIE_WRAP_AWUSER_OVR_EN                                    0xC01B18
+
+#define mmPCIE_WRAP_MAX_OUTSTAND                                     0xC01B20
+
+#define mmPCIE_WRAP_MST_IN                                           0xC01B24
+
+#define mmPCIE_WRAP_RSP_OK                                           0xC01B28
+
+#define mmPCIE_WRAP_LBW_CACHE_OVR                                    0xC01B40
+
+#define mmPCIE_WRAP_LBW_LOCK_OVR                                     0xC01B44
+
+#define mmPCIE_WRAP_LBW_PROT_OVR                                     0xC01B48
+
+#define mmPCIE_WRAP_LBW_ARUSER_OVR                                   0xC01B4C
+
+#define mmPCIE_WRAP_LBW_AWUSER_OVR                                   0xC01B50
+
+#define mmPCIE_WRAP_LBW_ARUSER_OVR_EN                                0xC01B58
+
+#define mmPCIE_WRAP_LBW_AWUSER_OVR_EN                                0xC01B5C
+
+#define mmPCIE_WRAP_LBW_MAX_OUTSTAND                                 0xC01B60
+
+#define mmPCIE_WRAP_LBW_MST_IN                                       0xC01B64
+
+#define mmPCIE_WRAP_LBW_RSP_OK                                       0xC01B68
+
+#define mmPCIE_WRAP_QUEUE_INIT                                       0xC01C00
+
+#define mmPCIE_WRAP_AXI_SPLIT_INTR_0                                 0xC01C10
+
+#define mmPCIE_WRAP_AXI_SPLIT_INTR_1                                 0xC01C14
+
+#define mmPCIE_WRAP_DB_AWUSER                                        0xC01D00
+
+#define mmPCIE_WRAP_DB_ARUSER                                        0xC01D04
+
+#define mmPCIE_WRAP_PCIE_AWUSER                                      0xC01D08
+
+#define mmPCIE_WRAP_PCIE_ARUSER                                      0xC01D0C
+
+#define mmPCIE_WRAP_PSOC_AWUSER                                      0xC01D10
+
+#define mmPCIE_WRAP_PSOC_ARUSER                                      0xC01D14
+
+#define mmPCIE_WRAP_SCH_Q_AWUSER                                     0xC01D18
+
+#define mmPCIE_WRAP_SCH_Q_ARUSER                                     0xC01D1C
+
+#define mmPCIE_WRAP_PSOC2PCI_AWUSER                                  0xC01D40
+
+#define mmPCIE_WRAP_PSOC2PCI_ARUSER                                  0xC01D44
+
+#define mmPCIE_WRAP_DRAIN_TIMEOUT                                    0xC01D50
+
+#define mmPCIE_WRAP_DRAIN_CFG                                        0xC01D54
+
+#define mmPCIE_WRAP_DB_AXI_ERR                                       0xC01DE0
+
+#define mmPCIE_WRAP_SPMU_INTR                                        0xC01DE4
+
+#define mmPCIE_WRAP_AXI_INTR                                         0xC01DE8
+
+#define mmPCIE_WRAP_E2E_CTRL                                         0xC01DF0
+
+#endif /* ASIC_REG_PCIE_WRAP_REGS_H_ */
index 8eda4de58788491fcb4bd8ac88dfc227655f8874..9271ea95ebe9bfa332b97affb132c9bdca1c214a 100644 (file)
 #define mmPSOC_EMMC_PLL_FREQ_CALC_EN                                 0xC70440
 
 #endif /* ASIC_REG_PSOC_EMMC_PLL_REGS_H_ */
-
index d4bf0e1db4df87dea684fce99648794166467b30..324266653c9aff3a1b45e5db0e3992711f2ed786 100644 (file)
 #define PSOC_GLOBAL_CONF_PAD_SEL_VAL_MASK                            0x3
 
 #endif /* ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_ */
-
index cfbdd2c9c5c753a4fb9093e46996b036ffc9ff88..8141f422e712bcb3bc308c9b8a736ea260444d82 100644 (file)
 #define mmPSOC_GLOBAL_CONF_PAD_SEL_81                                0xC4BA44
 
 #endif /* ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_ */
-
index 6723d8f76f307242c6a32d725bccda952b0c8cc1..4789ebb9c3372ca15a2d430a09226a5154f19004 100644 (file)
 #define mmPSOC_MME_PLL_FREQ_CALC_EN                                  0xC71440
 
 #endif /* ASIC_REG_PSOC_MME_PLL_REGS_H_ */
-
index abcded0531c9db56490e81d8388d4454f1b41399..27a296ea6c3dac2b3a147daeb8d91acd0893799c 100644 (file)
 #define mmPSOC_PCI_PLL_FREQ_CALC_EN                                  0xC72440
 
 #endif /* ASIC_REG_PSOC_PCI_PLL_REGS_H_ */
-
index 5925c7477c25f0d493ca039f5450e6e931cf44a1..66aee7fa6b1e1e84aea2b54b4ce606710d16a0c6 100644 (file)
 #define mmPSOC_SPI_RSVD_2                                            0xC430FC
 
 #endif /* ASIC_REG_PSOC_SPI_REGS_H_ */
-
index d56c9fa0e7badd8da930a457cb4d70d3eda17b41..2ea1770b078f229fe7997045ff096260a1b56184 100644 (file)
@@ -80,4 +80,3 @@
 #define mmSRAM_Y0_X0_RTR_DBG_L_ARB_MAX                               0x201330
 
 #endif /* ASIC_REG_SRAM_Y0_X0_RTR_REGS_H_ */
-
index 5624544303ca9126708e713d4e11a75897ecb834..37e0713efa73f3f55211a25fa75307986922b99c 100644 (file)
@@ -80,4 +80,3 @@
 #define mmSRAM_Y0_X1_RTR_DBG_L_ARB_MAX                               0x205330
 
 #endif /* ASIC_REG_SRAM_Y0_X1_RTR_REGS_H_ */
-
index 3322bc0bd1df1471d9b42eef3c94284aa25f8dfc..d2572279a2b920e0f42a9cd7681d5a4e2a7ced1a 100644 (file)
@@ -80,4 +80,3 @@
 #define mmSRAM_Y0_X2_RTR_DBG_L_ARB_MAX                               0x209330
 
 #endif /* ASIC_REG_SRAM_Y0_X2_RTR_REGS_H_ */
-
index 81e393db202720cd61db1c4c047c99d251d2bcfa..68c5b402c5067cdab4b3f444a05ade4d8cd50789 100644 (file)
@@ -80,4 +80,3 @@
 #define mmSRAM_Y0_X3_RTR_DBG_L_ARB_MAX                               0x20D330
 
 #endif /* ASIC_REG_SRAM_Y0_X3_RTR_REGS_H_ */
-
index b2e11b1de385b495b40d7088258970a2a8841558..a42f1ba06d28fcc27239b5b497d81578477a4cae 100644 (file)
@@ -80,4 +80,3 @@
 #define mmSRAM_Y0_X4_RTR_DBG_L_ARB_MAX                               0x211330
 
 #endif /* ASIC_REG_SRAM_Y0_X4_RTR_REGS_H_ */
-
index b4ea8cae2757b45e7c49396143b42df88bc4d218..94f2ed4a36bd27265b199e9bbe1108426bb084ad 100644 (file)
 #define STLB_SRAM_INIT_BUSY_DATA_MASK                                0x10
 
 #endif /* ASIC_REG_STLB_MASKS_H_ */
-
index 0f5281d3e65b62c0a1dbca1662f9d54c18554315..35013f65acd2d3a20cc795100d03917ad79c887f 100644 (file)
@@ -52,4 +52,3 @@
 #define mmSTLB_SRAM_INIT                                             0x49004C
 
 #endif /* ASIC_REG_STLB_REGS_H_ */
-
index e5587b49eecd4d6a6ed97dc22396e9c929897d21..89c9507a512fa32ad9a65c6490a46ac2546af61b 100644 (file)
 #define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_PATTERN_MASK             0x70000000
 
 #endif /* ASIC_REG_TPC0_CFG_MASKS_H_ */
-
index 2be28a63c50ab5af65596cd171bd90d11ab5ac1d..7d71c4b73a5e9be0383613a9393b85e2a3658f69 100644 (file)
 #define mmTPC0_CFG_FUNC_MBIST_MEM_9                                  0xE06E2C
 
 #endif /* ASIC_REG_TPC0_CFG_REGS_H_ */
-
index 9aa2d8b53207fe0fcbacc7a2a6b00c91b58369d4..9395f2458771a5d46fdb5ee7faae4b6ae48caf44 100644 (file)
 #define TPC0_CMDQ_CQ_BUF_RDATA_VAL_MASK                              0xFFFFFFFF
 
 #endif /* ASIC_REG_TPC0_CMDQ_MASKS_H_ */
-
index 3572752ba66ec42627d432d7945c87f700680301..bc51df573bf09a1c95c269f43c4c6c74b674c7ec 100644 (file)
 #define mmTPC0_CMDQ_CQ_BUF_RDATA                                     0xE0930C
 
 #endif /* ASIC_REG_TPC0_CMDQ_REGS_H_ */
-
index ed866d93c44036d944d809be3d9944c2d9149795..553c6b6bd5ec6d4cc8238d33d38359fbe465f751 100644 (file)
 #define TPC0_EML_CFG_DBG_INST_INSERT_CTL_INSERT_MASK                 0x1
 
 #endif /* ASIC_REG_TPC0_EML_CFG_MASKS_H_ */
-
index f1a1b4fa4841e956739d84f369eb4fcdaef78361..8495479c36595e72a960f7ffe509458a675a543c 100644 (file)
 #define mmTPC0_EML_CFG_DBG_INST_INSERT_CTL                           0x3040334
 
 #endif /* ASIC_REG_TPC0_EML_CFG_REGS_H_ */
-
index 7f86621179a55519ed862a7393543e25de4487bb..43fafcf010410846f246aa676235f3ecc5947467 100644 (file)
 #define TPC0_NRTR_NON_LIN_SCRAMB_EN_MASK                             0x1
 
 #endif /* ASIC_REG_TPC0_NRTR_MASKS_H_ */
-
index dc280f4e66081902c630261f722c27944e46687a..ce3346dd2042e743517438077f21ed2fdfbd075d 100644 (file)
 #define mmTPC0_NRTR_NON_LIN_SCRAMB                                   0xE00604
 
 #endif /* ASIC_REG_TPC0_NRTR_REGS_H_ */
-
index 80d97ee3d8d6c30526460c55f2d44b1de1626a54..2e4b45947944fc6d2affba2bc5423798845066d6 100644 (file)
 #define TPC0_QM_CQ_BUF_RDATA_VAL_MASK                                0xFFFFFFFF
 
 #endif /* ASIC_REG_TPC0_QM_MASKS_H_ */
-
index 7552d4ba61febd64814735da68ea7857a203bed2..4fa09eb88878120692c380a3ac9f22df22ecfd19 100644 (file)
 #define mmTPC0_QM_CQ_BUF_RDATA                                       0xE0830C
 
 #endif /* ASIC_REG_TPC0_QM_REGS_H_ */
-
index 19894413474aceafaa8f3fadd6c0b463f953c29f..928eef1808aefe2d0a43a8ff04805e03e7ba9d97 100644 (file)
 #define mmTPC1_CFG_FUNC_MBIST_MEM_9                                  0xE46E2C
 
 #endif /* ASIC_REG_TPC1_CFG_REGS_H_ */
-
index 9099ebd7ab238fb258f76889b45081605fa8381d..30ae0f30732865be52cffca4b48749dd1a4e7446 100644 (file)
 #define mmTPC1_CMDQ_CQ_BUF_RDATA                                     0xE4930C
 
 #endif /* ASIC_REG_TPC1_CMDQ_REGS_H_ */
-
index bc8b9a10391f58483178a19d8ea9f808691d418b..b95de4f95ba9dcd0252ffca7bc03aad45d2e6ddf 100644 (file)
 #define mmTPC1_QM_CQ_BUF_RDATA                                       0xE4830C
 
 #endif /* ASIC_REG_TPC1_QM_REGS_H_ */
-
index ae267f8f457e75a4b156bf7ba238f406b1dc8456..0f91e307879e964803899d801c1ee2dd7a20da96 100644 (file)
 #define mmTPC1_RTR_NON_LIN_SCRAMB                                    0xE40604
 
 #endif /* ASIC_REG_TPC1_RTR_REGS_H_ */
-
index 9c33fc039036fc40a345de082bff238d01742767..73421227f35bb5e55744db75a5f8e3fb5ada6dfa 100644 (file)
 #define mmTPC2_CFG_FUNC_MBIST_MEM_9                                  0xE86E2C
 
 #endif /* ASIC_REG_TPC2_CFG_REGS_H_ */
-
index 7a643887d6e1c9accaee53c41bc5f87da1e3e04f..27b66bf2da9f45e2fc17a4e80e8a0f4d4d645e8e 100644 (file)
 #define mmTPC2_CMDQ_CQ_BUF_RDATA                                     0xE8930C
 
 #endif /* ASIC_REG_TPC2_CMDQ_REGS_H_ */
-
index f3e32c018064cd2604ce729a5e668dab95ccb2d5..31e5b2f5390514aa059235c41db65287f898d3bc 100644 (file)
 #define mmTPC2_QM_CQ_BUF_RDATA                                       0xE8830C
 
 #endif /* ASIC_REG_TPC2_QM_REGS_H_ */
-
index 0eb0cd1fbd19bc5b0eaf4b2f4c001d63163b87cf..4eddeaa15d948ecfd74226c62e9bd325430a561e 100644 (file)
 #define mmTPC2_RTR_NON_LIN_SCRAMB                                    0xE80604
 
 #endif /* ASIC_REG_TPC2_RTR_REGS_H_ */
-
index 0baf63c69b255c0aeeb9292c947720ce541e37af..ce573a1a8361b7a2f0d2bb1b39ef2552def536bd 100644 (file)
 #define mmTPC3_CFG_FUNC_MBIST_MEM_9                                  0xEC6E2C
 
 #endif /* ASIC_REG_TPC3_CFG_REGS_H_ */
-
index 82a5261e852f5932e084c9253280fee795b8b3cf..11d81fca0a0fee009a4523bcea8af47078819b76 100644 (file)
 #define mmTPC3_CMDQ_CQ_BUF_RDATA                                     0xEC930C
 
 #endif /* ASIC_REG_TPC3_CMDQ_REGS_H_ */
-
index b05b1e18e664730ee9381a6c88a2ce56287786c8..e41595a19e69114860d84aa0f260598234128ed0 100644 (file)
 #define mmTPC3_QM_CQ_BUF_RDATA                                       0xEC830C
 
 #endif /* ASIC_REG_TPC3_QM_REGS_H_ */
-
index 5a2fd76526508d617b5d4ae69f7bac8e65ab199a..34a438b1efe5d7598273f79f9ae4db3c317266b4 100644 (file)
 #define mmTPC3_RTR_NON_LIN_SCRAMB                                    0xEC0604
 
 #endif /* ASIC_REG_TPC3_RTR_REGS_H_ */
-
index d64a100075f290969bbba78597d447cd91e187b4..d44caf0fc1bb00d31a3165bf47ae5d8e3e0edd96 100644 (file)
 #define mmTPC4_CFG_FUNC_MBIST_MEM_9                                  0xF06E2C
 
 #endif /* ASIC_REG_TPC4_CFG_REGS_H_ */
-
index 565b42885b0d1e9683634374a3b64af14cab4c9c..f13a6532961ff2a0174dee4cd924755d15d14d2a 100644 (file)
 #define mmTPC4_CMDQ_CQ_BUF_RDATA                                     0xF0930C
 
 #endif /* ASIC_REG_TPC4_CMDQ_REGS_H_ */
-
index 196da3f1271026b58f7e99851b9256424ecc9b9c..db081fc17cfc095bdfc5355aa8db135a057aed65 100644 (file)
 #define mmTPC4_QM_CQ_BUF_RDATA                                       0xF0830C
 
 #endif /* ASIC_REG_TPC4_QM_REGS_H_ */
-
index 8b54041d144a065f6f44622309f2c662caa2c0d7..8c5372303b28ad1fc19d5e27fbc90c44c4ee8607 100644 (file)
 #define mmTPC4_RTR_NON_LIN_SCRAMB                                    0xF00604
 
 #endif /* ASIC_REG_TPC4_RTR_REGS_H_ */
-
index 3f00954fcdba8897926acaacac94db88c2316909..5139fde710117f4503e33a25bda0b1740994732b 100644 (file)
 #define mmTPC5_CFG_FUNC_MBIST_MEM_9                                  0xF46E2C
 
 #endif /* ASIC_REG_TPC5_CFG_REGS_H_ */
-
index d8e72a8e18d7aa2b288c3d8a6fb51d445b574f4e..1e7cd6e1e88831bd191d43d2310138a4c138ceb6 100644 (file)
 #define mmTPC5_CMDQ_CQ_BUF_RDATA                                     0xF4930C
 
 #endif /* ASIC_REG_TPC5_CMDQ_REGS_H_ */
-
index be2e68624709554e98f5004d48a455b5786613e6..ac0d3820cd6b853890392007e037e03307162226 100644 (file)
 #define mmTPC5_QM_CQ_BUF_RDATA                                       0xF4830C
 
 #endif /* ASIC_REG_TPC5_QM_REGS_H_ */
-
index 6f301c7bbc2f6dfa7c2d6e82150ecdc2601cafca..57f83bc3b17d654fc2436dcb8af9884ab7b9814c 100644 (file)
 #define mmTPC5_RTR_NON_LIN_SCRAMB                                    0xF40604
 
 #endif /* ASIC_REG_TPC5_RTR_REGS_H_ */
-
index 1e1168601c41e100e49fa0802aaf70373f10ca50..94e0191c06c18fc4a7619f331b06c8eb283ab0fe 100644 (file)
 #define mmTPC6_CFG_FUNC_MBIST_MEM_9                                  0xF86E2C
 
 #endif /* ASIC_REG_TPC6_CFG_REGS_H_ */
-
index fbca6b47284ed850d88567342b985584cfcc83c6..7a1a0e87b22557e091d6b6ace71437b27b5cf888 100644 (file)
 #define mmTPC6_CMDQ_CQ_BUF_RDATA                                     0xF8930C
 
 #endif /* ASIC_REG_TPC6_CMDQ_REGS_H_ */
-
index bf32465dabcb1a3be21c0574ce65c4b5218df4ae..80fa0fe0f60f4896e8e0ef34d86f10119d9275e8 100644 (file)
 #define mmTPC6_QM_CQ_BUF_RDATA                                       0xF8830C
 
 #endif /* ASIC_REG_TPC6_QM_REGS_H_ */
-
index 609bb90e10467ff6ead2e3bca016c18b7ec5f19c..d6cae8b8af669d441b4cc9ca7bcef00920db986c 100644 (file)
 #define mmTPC6_RTR_NON_LIN_SCRAMB                                    0xF80604
 
 #endif /* ASIC_REG_TPC6_RTR_REGS_H_ */
-
index bf2fd0f73906b79d4d5c39f7e483f87df83cd66f..234147adb7796ddaa8f2ae1dc0a1f726516add04 100644 (file)
 #define mmTPC7_CFG_FUNC_MBIST_MEM_9                                  0xFC6E2C
 
 #endif /* ASIC_REG_TPC7_CFG_REGS_H_ */
-
index 65d83043bf630c8f87e3636291c348865d052a53..4c160632fe7d7428b0ae16fa52a3b425d54a5ddd 100644 (file)
 #define mmTPC7_CMDQ_CQ_BUF_RDATA                                     0xFC930C
 
 #endif /* ASIC_REG_TPC7_CMDQ_REGS_H_ */
-
index 3d5848d873043c28b5c0f46830873781fc781aee..0c13d4d167aa96d52331a18e8e826ede0820722b 100644 (file)
 #define mmTPC7_NRTR_NON_LIN_SCRAMB                                   0xFC0604
 
 #endif /* ASIC_REG_TPC7_NRTR_REGS_H_ */
-
index 25f5095f68fb902fcc6d1cc1d76bada9409d7348..cbe11425bfb0ab87a2d12f0df4a2bb28205dce46 100644 (file)
 #define mmTPC7_QM_CQ_BUF_RDATA                                       0xFC830C
 
 #endif /* ASIC_REG_TPC7_QM_REGS_H_ */
-
index 920231d0afa521040fda6263676588172b5bc016..e25e19660a9db99969c08aaf1b6ae5031b4d170a 100644 (file)
 #define mmTPC_PLL_FREQ_CALC_EN                                       0xE01440
 
 #endif /* ASIC_REG_TPC_PLL_REGS_H_ */
-
index 614149efa41200b1ef3a0fd539c2e87258d7cac6..3f02a52ba4ce9847c6b4f94141314c26f6e7896d 100644 (file)
@@ -8,10 +8,6 @@
 #ifndef GOYA_H
 #define GOYA_H
 
-#include "asic_reg/goya_regs.h"
-
-#include <linux/types.h>
-
 #define SRAM_CFG_BAR_ID                0
 #define MSIX_BAR_ID            2
 #define DDR_BAR_ID             4
index 497937a17ee9b51fd7846ce47d4f79239d896c76..bb7a1aa3279ebb468661b222f70b6e393a31c468 100644 (file)
@@ -9,7 +9,9 @@
 #define __GOYA_ASYNC_EVENTS_H_
 
 enum goya_async_event_id {
+       GOYA_ASYNC_EVENT_ID_PCIE_CORE = 32,
        GOYA_ASYNC_EVENT_ID_PCIE_IF = 33,
+       GOYA_ASYNC_EVENT_ID_PCIE_PHY = 34,
        GOYA_ASYNC_EVENT_ID_TPC0_ECC = 36,
        GOYA_ASYNC_EVENT_ID_TPC1_ECC = 39,
        GOYA_ASYNC_EVENT_ID_TPC2_ECC = 42,
@@ -23,6 +25,8 @@ enum goya_async_event_id {
        GOYA_ASYNC_EVENT_ID_MMU_ECC = 63,
        GOYA_ASYNC_EVENT_ID_DMA_MACRO = 64,
        GOYA_ASYNC_EVENT_ID_DMA_ECC = 66,
+       GOYA_ASYNC_EVENT_ID_DDR0_PARITY = 69,
+       GOYA_ASYNC_EVENT_ID_DDR1_PARITY = 72,
        GOYA_ASYNC_EVENT_ID_CPU_IF_ECC = 75,
        GOYA_ASYNC_EVENT_ID_PSOC_MEM = 78,
        GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT = 79,
@@ -72,6 +76,7 @@ enum goya_async_event_id {
        GOYA_ASYNC_EVENT_ID_MME_WACSD = 142,
        GOYA_ASYNC_EVENT_ID_PLL0 = 143,
        GOYA_ASYNC_EVENT_ID_PLL1 = 144,
+       GOYA_ASYNC_EVENT_ID_PLL2 = 145,
        GOYA_ASYNC_EVENT_ID_PLL3 = 146,
        GOYA_ASYNC_EVENT_ID_PLL4 = 147,
        GOYA_ASYNC_EVENT_ID_PLL5 = 148,
@@ -81,6 +86,7 @@ enum goya_async_event_id {
        GOYA_ASYNC_EVENT_ID_PSOC = 160,
        GOYA_ASYNC_EVENT_ID_PCIE_FLR = 171,
        GOYA_ASYNC_EVENT_ID_PCIE_HOT_RESET = 172,
+       GOYA_ASYNC_EVENT_ID_PCIE_PERST = 173,
        GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG0 = 174,
        GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG1 = 175,
        GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG2 = 176,
@@ -144,8 +150,11 @@ enum goya_async_event_id {
        GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_0 = 330,
        GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_1 = 331,
        GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_2 = 332,
+       GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_3 = 333,
+       GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_4 = 334,
        GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET = 356,
        GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT = 361,
+       GOYA_ASYNC_EVENT_ID_FAN = 425,
        GOYA_ASYNC_EVENT_ID_TPC0_CMDQ = 430,
        GOYA_ASYNC_EVENT_ID_TPC1_CMDQ = 431,
        GOYA_ASYNC_EVENT_ID_TPC2_CMDQ = 432,
diff --git a/drivers/misc/habanalabs/include/goya/goya_coresight.h b/drivers/misc/habanalabs/include/goya/goya_coresight.h
new file mode 100644 (file)
index 0000000..6e933c0
--- /dev/null
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GOYA_CORESIGHT_H
+#define GOYA_CORESIGHT_H
+
+enum goya_debug_stm_regs_index {
+       GOYA_STM_FIRST = 0,
+       GOYA_STM_CPU = GOYA_STM_FIRST,
+       GOYA_STM_DMA_CH_0_CS,
+       GOYA_STM_DMA_CH_1_CS,
+       GOYA_STM_DMA_CH_2_CS,
+       GOYA_STM_DMA_CH_3_CS,
+       GOYA_STM_DMA_CH_4_CS,
+       GOYA_STM_DMA_MACRO_CS,
+       GOYA_STM_MME1_SBA,
+       GOYA_STM_MME3_SBB,
+       GOYA_STM_MME4_WACS2,
+       GOYA_STM_MME4_WACS,
+       GOYA_STM_MMU_CS,
+       GOYA_STM_PCIE,
+       GOYA_STM_PSOC,
+       GOYA_STM_TPC0_EML,
+       GOYA_STM_TPC1_EML,
+       GOYA_STM_TPC2_EML,
+       GOYA_STM_TPC3_EML,
+       GOYA_STM_TPC4_EML,
+       GOYA_STM_TPC5_EML,
+       GOYA_STM_TPC6_EML,
+       GOYA_STM_TPC7_EML,
+       GOYA_STM_LAST = GOYA_STM_TPC7_EML
+};
+
+enum goya_debug_etf_regs_index {
+       GOYA_ETF_FIRST = 0,
+       GOYA_ETF_CPU_0 = GOYA_ETF_FIRST,
+       GOYA_ETF_CPU_1,
+       GOYA_ETF_CPU_TRACE,
+       GOYA_ETF_DMA_CH_0_CS,
+       GOYA_ETF_DMA_CH_1_CS,
+       GOYA_ETF_DMA_CH_2_CS,
+       GOYA_ETF_DMA_CH_3_CS,
+       GOYA_ETF_DMA_CH_4_CS,
+       GOYA_ETF_DMA_MACRO_CS,
+       GOYA_ETF_MME1_SBA,
+       GOYA_ETF_MME3_SBB,
+       GOYA_ETF_MME4_WACS2,
+       GOYA_ETF_MME4_WACS,
+       GOYA_ETF_MMU_CS,
+       GOYA_ETF_PCIE,
+       GOYA_ETF_PSOC,
+       GOYA_ETF_TPC0_EML,
+       GOYA_ETF_TPC1_EML,
+       GOYA_ETF_TPC2_EML,
+       GOYA_ETF_TPC3_EML,
+       GOYA_ETF_TPC4_EML,
+       GOYA_ETF_TPC5_EML,
+       GOYA_ETF_TPC6_EML,
+       GOYA_ETF_TPC7_EML,
+       GOYA_ETF_LAST = GOYA_ETF_TPC7_EML
+};
+
+enum goya_debug_funnel_regs_index {
+       GOYA_FUNNEL_FIRST = 0,
+       GOYA_FUNNEL_CPU = GOYA_FUNNEL_FIRST,
+       GOYA_FUNNEL_DMA_CH_6_1,
+       GOYA_FUNNEL_DMA_MACRO_3_1,
+       GOYA_FUNNEL_MME0_RTR,
+       GOYA_FUNNEL_MME1_RTR,
+       GOYA_FUNNEL_MME2_RTR,
+       GOYA_FUNNEL_MME3_RTR,
+       GOYA_FUNNEL_MME4_RTR,
+       GOYA_FUNNEL_MME5_RTR,
+       GOYA_FUNNEL_PCIE,
+       GOYA_FUNNEL_PSOC,
+       GOYA_FUNNEL_TPC0_EML,
+       GOYA_FUNNEL_TPC1_EML,
+       GOYA_FUNNEL_TPC1_RTR,
+       GOYA_FUNNEL_TPC2_EML,
+       GOYA_FUNNEL_TPC2_RTR,
+       GOYA_FUNNEL_TPC3_EML,
+       GOYA_FUNNEL_TPC3_RTR,
+       GOYA_FUNNEL_TPC4_EML,
+       GOYA_FUNNEL_TPC4_RTR,
+       GOYA_FUNNEL_TPC5_EML,
+       GOYA_FUNNEL_TPC5_RTR,
+       GOYA_FUNNEL_TPC6_EML,
+       GOYA_FUNNEL_TPC6_RTR,
+       GOYA_FUNNEL_TPC7_EML,
+       GOYA_FUNNEL_LAST = GOYA_FUNNEL_TPC7_EML
+};
+
+enum goya_debug_bmon_regs_index {
+       GOYA_BMON_FIRST = 0,
+       GOYA_BMON_CPU_RD = GOYA_BMON_FIRST,
+       GOYA_BMON_CPU_WR,
+       GOYA_BMON_DMA_CH_0_0,
+       GOYA_BMON_DMA_CH_0_1,
+       GOYA_BMON_DMA_CH_1_0,
+       GOYA_BMON_DMA_CH_1_1,
+       GOYA_BMON_DMA_CH_2_0,
+       GOYA_BMON_DMA_CH_2_1,
+       GOYA_BMON_DMA_CH_3_0,
+       GOYA_BMON_DMA_CH_3_1,
+       GOYA_BMON_DMA_CH_4_0,
+       GOYA_BMON_DMA_CH_4_1,
+       GOYA_BMON_DMA_MACRO_0,
+       GOYA_BMON_DMA_MACRO_1,
+       GOYA_BMON_DMA_MACRO_2,
+       GOYA_BMON_DMA_MACRO_3,
+       GOYA_BMON_DMA_MACRO_4,
+       GOYA_BMON_DMA_MACRO_5,
+       GOYA_BMON_DMA_MACRO_6,
+       GOYA_BMON_DMA_MACRO_7,
+       GOYA_BMON_MME1_SBA_0,
+       GOYA_BMON_MME1_SBA_1,
+       GOYA_BMON_MME3_SBB_0,
+       GOYA_BMON_MME3_SBB_1,
+       GOYA_BMON_MME4_WACS2_0,
+       GOYA_BMON_MME4_WACS2_1,
+       GOYA_BMON_MME4_WACS2_2,
+       GOYA_BMON_MME4_WACS_0,
+       GOYA_BMON_MME4_WACS_1,
+       GOYA_BMON_MME4_WACS_2,
+       GOYA_BMON_MME4_WACS_3,
+       GOYA_BMON_MME4_WACS_4,
+       GOYA_BMON_MME4_WACS_5,
+       GOYA_BMON_MME4_WACS_6,
+       GOYA_BMON_MMU_0,
+       GOYA_BMON_MMU_1,
+       GOYA_BMON_PCIE_MSTR_RD,
+       GOYA_BMON_PCIE_MSTR_WR,
+       GOYA_BMON_PCIE_SLV_RD,
+       GOYA_BMON_PCIE_SLV_WR,
+       GOYA_BMON_TPC0_EML_0,
+       GOYA_BMON_TPC0_EML_1,
+       GOYA_BMON_TPC0_EML_2,
+       GOYA_BMON_TPC0_EML_3,
+       GOYA_BMON_TPC1_EML_0,
+       GOYA_BMON_TPC1_EML_1,
+       GOYA_BMON_TPC1_EML_2,
+       GOYA_BMON_TPC1_EML_3,
+       GOYA_BMON_TPC2_EML_0,
+       GOYA_BMON_TPC2_EML_1,
+       GOYA_BMON_TPC2_EML_2,
+       GOYA_BMON_TPC2_EML_3,
+       GOYA_BMON_TPC3_EML_0,
+       GOYA_BMON_TPC3_EML_1,
+       GOYA_BMON_TPC3_EML_2,
+       GOYA_BMON_TPC3_EML_3,
+       GOYA_BMON_TPC4_EML_0,
+       GOYA_BMON_TPC4_EML_1,
+       GOYA_BMON_TPC4_EML_2,
+       GOYA_BMON_TPC4_EML_3,
+       GOYA_BMON_TPC5_EML_0,
+       GOYA_BMON_TPC5_EML_1,
+       GOYA_BMON_TPC5_EML_2,
+       GOYA_BMON_TPC5_EML_3,
+       GOYA_BMON_TPC6_EML_0,
+       GOYA_BMON_TPC6_EML_1,
+       GOYA_BMON_TPC6_EML_2,
+       GOYA_BMON_TPC6_EML_3,
+       GOYA_BMON_TPC7_EML_0,
+       GOYA_BMON_TPC7_EML_1,
+       GOYA_BMON_TPC7_EML_2,
+       GOYA_BMON_TPC7_EML_3,
+       GOYA_BMON_LAST = GOYA_BMON_TPC7_EML_3
+};
+
+enum goya_debug_spmu_regs_index {
+       GOYA_SPMU_FIRST = 0,
+       GOYA_SPMU_DMA_CH_0_CS = GOYA_SPMU_FIRST,
+       GOYA_SPMU_DMA_CH_1_CS,
+       GOYA_SPMU_DMA_CH_2_CS,
+       GOYA_SPMU_DMA_CH_3_CS,
+       GOYA_SPMU_DMA_CH_4_CS,
+       GOYA_SPMU_DMA_MACRO_CS,
+       GOYA_SPMU_MME1_SBA,
+       GOYA_SPMU_MME3_SBB,
+       GOYA_SPMU_MME4_WACS2,
+       GOYA_SPMU_MME4_WACS,
+       GOYA_SPMU_MMU_CS,
+       GOYA_SPMU_PCIE,
+       GOYA_SPMU_TPC0_EML,
+       GOYA_SPMU_TPC1_EML,
+       GOYA_SPMU_TPC2_EML,
+       GOYA_SPMU_TPC3_EML,
+       GOYA_SPMU_TPC4_EML,
+       GOYA_SPMU_TPC5_EML,
+       GOYA_SPMU_TPC6_EML,
+       GOYA_SPMU_TPC7_EML,
+       GOYA_SPMU_LAST = GOYA_SPMU_TPC7_EML
+};
+
+#endif /* GOYA_CORESIGHT_H */
index a9920cb4a07b23f48793777f4fa11bcccea6ec64..0fa80fe9f6cc34e0e180906122c9bb2e6d217dc6 100644 (file)
@@ -8,6 +8,8 @@
 #ifndef GOYA_FW_IF_H
 #define GOYA_FW_IF_H
 
+#define GOYA_EVENT_QUEUE_MSIX_IDX      5
+
 #define CPU_BOOT_ADDR          0x7FF8040000ull
 
 #define UBOOT_FW_OFFSET                0x100000                /* 1MB in SRAM */
index 7475732b9996bc225c5cb89313c5340342a97926..4cd04c0902853312cd7c729226fe0191fe2665d9 100644 (file)
@@ -18,7 +18,8 @@ enum cpu_boot_status {
        CPU_BOOT_STATUS_IN_SPL,
        CPU_BOOT_STATUS_IN_UBOOT,
        CPU_BOOT_STATUS_DRAM_INIT_FAIL,
-       CPU_BOOT_STATUS_FIT_CORRUPTED
+       CPU_BOOT_STATUS_FIT_CORRUPTED,
+       CPU_BOOT_STATUS_UBOOT_NOT_READY,
 };
 
 enum kmd_msg {
index b680052ee3f0d61287920b2b9ea9c3c3a9fdc98f..71ea3c3e8ba33354648eb7eba1e27d4070994da1 100644 (file)
 #define PAGE_SIZE_4KB                  (_AC(1, UL) << PAGE_SHIFT_4KB)
 #define PAGE_MASK_2MB                  (~(PAGE_SIZE_2MB - 1))
 
-#define PAGE_PRESENT_MASK              0x0000000000001
-#define SWAP_OUT_MASK                  0x0000000000004
-#define LAST_MASK                      0x0000000000800
-#define PHYS_ADDR_MASK                 0x3FFFFFFFFF000ull
+#define PAGE_PRESENT_MASK              0x0000000000001ull
+#define SWAP_OUT_MASK                  0x0000000000004ull
+#define LAST_MASK                      0x0000000000800ull
+#define PHYS_ADDR_MASK                 0xFFFFFFFFFFFFF000ull
 #define HOP0_MASK                      0x3000000000000ull
 #define HOP1_MASK                      0x0FF8000000000ull
 #define HOP2_MASK                      0x0007FC0000000ull
-#define HOP3_MASK                      0x000003FE00000
-#define HOP4_MASK                      0x00000001FF000
-#define OFFSET_MASK                    0x0000000000FFF
+#define HOP3_MASK                      0x000003FE00000ull
+#define HOP4_MASK                      0x00000001FF000ull
+#define OFFSET_MASK                    0x0000000000FFFull
 
 #define HOP0_SHIFT                     48
 #define HOP1_SHIFT                     39
@@ -32,7 +32,7 @@
 #define HOP4_SHIFT                     12
 
 #define PTE_PHYS_ADDR_SHIFT            12
-#define PTE_PHYS_ADDR_MASK             ~0xFFF
+#define PTE_PHYS_ADDR_MASK             ~OFFSET_MASK
 
 #define HL_PTE_SIZE                    sizeof(u64)
 #define HOP_TABLE_SIZE                 PAGE_SIZE_4KB
diff --git a/drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h b/drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h
new file mode 100644 (file)
index 0000000..d232081
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef INCLUDE_PCI_GENERAL_H_
+#define INCLUDE_PCI_GENERAL_H_
+
+/* PCI CONFIGURATION SPACE */
+#define mmPCI_CONFIG_ELBI_ADDR         0xFF0
+#define mmPCI_CONFIG_ELBI_DATA         0xFF4
+#define mmPCI_CONFIG_ELBI_CTRL         0xFF8
+#define PCI_CONFIG_ELBI_CTRL_WRITE     (1 << 31)
+
+#define mmPCI_CONFIG_ELBI_STS          0xFFC
+#define PCI_CONFIG_ELBI_STS_ERR                (1 << 30)
+#define PCI_CONFIG_ELBI_STS_DONE       (1 << 31)
+#define PCI_CONFIG_ELBI_STS_MASK       (PCI_CONFIG_ELBI_STS_ERR | \
+                                       PCI_CONFIG_ELBI_STS_DONE)
+
+#endif /* INCLUDE_PCI_GENERAL_H_ */
index ce1fda40a8b8112572b9a26db139c8aa6de76f8e..43ef3ad8438ab6dc2d276237e95df2b17e44ec99 100644 (file)
@@ -109,7 +109,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
                                                        page_size);
                        if (!phys_pg_pack->pages[i]) {
                                dev_err(hdev->dev,
-                                       "ioctl failed to allocate page\n");
+                                       "Failed to allocate device memory (out of memory)\n");
                                rc = -ENOMEM;
                                goto page_err;
                        }
@@ -1046,10 +1046,17 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
 
        mutex_lock(&ctx->mmu_lock);
 
-       for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size)
+       for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
                if (hl_mmu_unmap(ctx, next_vaddr, page_size))
                        dev_warn_ratelimited(hdev->dev,
-                               "unmap failed for vaddr: 0x%llx\n", next_vaddr);
+                       "unmap failed for vaddr: 0x%llx\n", next_vaddr);
+
+               /* unmapping on Palladium can be really long, so avoid a CPU
+                * soft lockup bug by sleeping a little between unmapping pages
+                */
+               if (hdev->pldm)
+                       usleep_range(500, 1000);
+       }
 
        hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
 
@@ -1083,6 +1090,64 @@ vm_type_err:
        return rc;
 }
 
+static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
+{
+       struct hl_device *hdev = hpriv->hdev;
+       struct hl_ctx *ctx = hpriv->ctx;
+       u64 device_addr = 0;
+       u32 handle = 0;
+       int rc;
+
+       switch (args->in.op) {
+       case HL_MEM_OP_ALLOC:
+               if (args->in.alloc.mem_size == 0) {
+                       dev_err(hdev->dev,
+                               "alloc size must be larger than 0\n");
+                       rc = -EINVAL;
+                       goto out;
+               }
+
+               /* Force contiguous as there are no real MMU
+                * translations to overcome physical memory gaps
+                */
+               args->in.flags |= HL_MEM_CONTIGUOUS;
+               rc = alloc_device_memory(ctx, &args->in, &handle);
+
+               memset(args, 0, sizeof(*args));
+               args->out.handle = (__u64) handle;
+               break;
+
+       case HL_MEM_OP_FREE:
+               rc = free_device_memory(ctx, args->in.free.handle);
+               break;
+
+       case HL_MEM_OP_MAP:
+               if (args->in.flags & HL_MEM_USERPTR) {
+                       device_addr = args->in.map_host.host_virt_addr;
+                       rc = 0;
+               } else {
+                       rc = get_paddr_from_handle(ctx, &args->in,
+                                       &device_addr);
+               }
+
+               memset(args, 0, sizeof(*args));
+               args->out.device_virt_addr = device_addr;
+               break;
+
+       case HL_MEM_OP_UNMAP:
+               rc = 0;
+               break;
+
+       default:
+               dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
+               rc = -ENOTTY;
+               break;
+       }
+
+out:
+       return rc;
+}
+
 int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
 {
        union hl_mem_args *args = data;
@@ -1094,104 +1159,54 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
 
        if (hl_device_disabled_or_in_reset(hdev)) {
                dev_warn_ratelimited(hdev->dev,
-                       "Device is disabled or in reset. Can't execute memory IOCTL\n");
+                       "Device is %s. Can't execute MEMORY IOCTL\n",
+                       atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
                return -EBUSY;
        }
 
-       if (hdev->mmu_enable) {
-               switch (args->in.op) {
-               case HL_MEM_OP_ALLOC:
-                       if (!hdev->dram_supports_virtual_memory) {
-                               dev_err(hdev->dev,
-                                       "DRAM alloc is not supported\n");
-                               rc = -EINVAL;
-                               goto out;
-                       }
-                       if (args->in.alloc.mem_size == 0) {
-                               dev_err(hdev->dev,
-                                       "alloc size must be larger than 0\n");
-                               rc = -EINVAL;
-                               goto out;
-                       }
-                       rc = alloc_device_memory(ctx, &args->in, &handle);
-
-                       memset(args, 0, sizeof(*args));
-                       args->out.handle = (__u64) handle;
-                       break;
-
-               case HL_MEM_OP_FREE:
-                       if (!hdev->dram_supports_virtual_memory) {
-                               dev_err(hdev->dev,
-                                       "DRAM free is not supported\n");
-                               rc = -EINVAL;
-                               goto out;
-                       }
-                       rc = free_device_memory(ctx, args->in.free.handle);
-                       break;
-
-               case HL_MEM_OP_MAP:
-                       rc = map_device_va(ctx, &args->in, &device_addr);
-
-                       memset(args, 0, sizeof(*args));
-                       args->out.device_virt_addr = device_addr;
-                       break;
-
-               case HL_MEM_OP_UNMAP:
-                       rc = unmap_device_va(ctx,
-                                       args->in.unmap.device_virt_addr);
-                       break;
+       if (!hdev->mmu_enable)
+               return mem_ioctl_no_mmu(hpriv, args);
 
-               default:
-                       dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
-                       rc = -ENOTTY;
-                       break;
+       switch (args->in.op) {
+       case HL_MEM_OP_ALLOC:
+               if (!hdev->dram_supports_virtual_memory) {
+                       dev_err(hdev->dev, "DRAM alloc is not supported\n");
+                       rc = -EINVAL;
+                       goto out;
                }
-       } else {
-               switch (args->in.op) {
-               case HL_MEM_OP_ALLOC:
-                       if (args->in.alloc.mem_size == 0) {
-                               dev_err(hdev->dev,
-                                       "alloc size must be larger than 0\n");
-                               rc = -EINVAL;
-                               goto out;
-                       }
 
-                       /* Force contiguous as there are no real MMU
-                        * translations to overcome physical memory gaps
-                        */
-                       args->in.flags |= HL_MEM_CONTIGUOUS;
-                       rc = alloc_device_memory(ctx, &args->in, &handle);
+               if (args->in.alloc.mem_size == 0) {
+                       dev_err(hdev->dev,
+                               "alloc size must be larger than 0\n");
+                       rc = -EINVAL;
+                       goto out;
+               }
+               rc = alloc_device_memory(ctx, &args->in, &handle);
 
-                       memset(args, 0, sizeof(*args));
-                       args->out.handle = (__u64) handle;
-                       break;
+               memset(args, 0, sizeof(*args));
+               args->out.handle = (__u64) handle;
+               break;
 
-               case HL_MEM_OP_FREE:
-                       rc = free_device_memory(ctx, args->in.free.handle);
-                       break;
+       case HL_MEM_OP_FREE:
+               rc = free_device_memory(ctx, args->in.free.handle);
+               break;
 
-               case HL_MEM_OP_MAP:
-                       if (args->in.flags & HL_MEM_USERPTR) {
-                               device_addr = args->in.map_host.host_virt_addr;
-                               rc = 0;
-                       } else {
-                               rc = get_paddr_from_handle(ctx, &args->in,
-                                               &device_addr);
-                       }
+       case HL_MEM_OP_MAP:
+               rc = map_device_va(ctx, &args->in, &device_addr);
 
-                       memset(args, 0, sizeof(*args));
-                       args->out.device_virt_addr = device_addr;
-                       break;
+               memset(args, 0, sizeof(*args));
+               args->out.device_virt_addr = device_addr;
+               break;
 
-               case HL_MEM_OP_UNMAP:
-                       rc = 0;
-                       break;
+       case HL_MEM_OP_UNMAP:
+               rc = unmap_device_va(ctx,
+                               args->in.unmap.device_virt_addr);
+               break;
 
-               default:
-                       dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
-                       rc = -ENOTTY;
-                       break;
-               }
+       default:
+               dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
+               rc = -ENOTTY;
+               break;
        }
 
 out:
index 3a5a2cec83051b08c1b838372aaf29c0f1b99e13..533d9315b6fb968b16fac5e9a0fbbc9847d83206 100644 (file)
 #include <linux/genalloc.h>
 #include <linux/slab.h>
 
-static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 addr)
+static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr);
+
+static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
 {
        struct pgt_info *pgt_info = NULL;
 
-       hash_for_each_possible(ctx->mmu_hash, pgt_info, node,
-                               (unsigned long) addr)
-               if (addr == pgt_info->addr)
+       hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node,
+                               (unsigned long) hop_addr)
+               if (hop_addr == pgt_info->shadow_addr)
                        break;
 
        return pgt_info;
@@ -25,45 +27,109 @@ static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 addr)
 
 static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
 {
+       struct hl_device *hdev = ctx->hdev;
        struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
 
-       gen_pool_free(pgt_info->ctx->hdev->mmu_pgt_pool, pgt_info->addr,
-                       ctx->hdev->asic_prop.mmu_hop_table_size);
+       gen_pool_free(hdev->mmu_pgt_pool, pgt_info->phys_addr,
+                       hdev->asic_prop.mmu_hop_table_size);
        hash_del(&pgt_info->node);
-
+       kfree((u64 *) (uintptr_t) pgt_info->shadow_addr);
        kfree(pgt_info);
 }
 
 static u64 alloc_hop(struct hl_ctx *ctx)
 {
        struct hl_device *hdev = ctx->hdev;
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
        struct pgt_info *pgt_info;
-       u64 addr;
+       u64 phys_addr, shadow_addr;
 
        pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
        if (!pgt_info)
                return ULLONG_MAX;
 
-       addr = (u64) gen_pool_alloc(hdev->mmu_pgt_pool,
-                       hdev->asic_prop.mmu_hop_table_size);
-       if (!addr) {
+       phys_addr = (u64) gen_pool_alloc(hdev->mmu_pgt_pool,
+                                       prop->mmu_hop_table_size);
+       if (!phys_addr) {
                dev_err(hdev->dev, "failed to allocate page\n");
-               kfree(pgt_info);
-               return ULLONG_MAX;
+               goto pool_add_err;
        }
 
-       pgt_info->addr = addr;
+       shadow_addr = (u64) (uintptr_t) kzalloc(prop->mmu_hop_table_size,
+                                               GFP_KERNEL);
+       if (!shadow_addr)
+               goto shadow_err;
+
+       pgt_info->phys_addr = phys_addr;
+       pgt_info->shadow_addr = shadow_addr;
        pgt_info->ctx = ctx;
        pgt_info->num_of_ptes = 0;
-       hash_add(ctx->mmu_hash, &pgt_info->node, addr);
+       hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr);
+
+       return shadow_addr;
+
+shadow_err:
+       gen_pool_free(hdev->mmu_pgt_pool, phys_addr, prop->mmu_hop_table_size);
+pool_add_err:
+       kfree(pgt_info);
+
+       return ULLONG_MAX;
+}
+
+static inline u64 get_phys_hop0_addr(struct hl_ctx *ctx)
+{
+       return ctx->hdev->asic_prop.mmu_pgt_addr +
+                       (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
+}
+
+static inline u64 get_hop0_addr(struct hl_ctx *ctx)
+{
+       return (u64) (uintptr_t) ctx->hdev->mmu_shadow_hop0 +
+                       (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
+}
+
+static inline void flush(struct hl_ctx *ctx)
+{
+       /* flush all writes from all cores to reach PCI */
+       mb();
+       ctx->hdev->asic_funcs->read_pte(ctx->hdev, get_phys_hop0_addr(ctx));
+}
+
+/* transform the value to physical address when writing to H/W */
+static inline void write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
+{
+       /*
+        * The value to write is actually the address of the next shadow hop +
+        * flags at the 12 LSBs.
+        * Hence in order to get the value to write to the physical PTE, we
+        * clear the 12 LSBs and translate the shadow hop to its associated
+        * physical hop, and add back the original 12 LSBs.
+        */
+       u64 phys_val = get_phys_addr(ctx, val & PTE_PHYS_ADDR_MASK) |
+                               (val & OFFSET_MASK);
+
+       ctx->hdev->asic_funcs->write_pte(ctx->hdev,
+                                       get_phys_addr(ctx, shadow_pte_addr),
+                                       phys_val);
+
+       *(u64 *) (uintptr_t) shadow_pte_addr = val;
+}
 
-       return addr;
+/* do not transform the value to physical address when writing to H/W */
+static inline void write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr,
+                                       u64 val)
+{
+       ctx->hdev->asic_funcs->write_pte(ctx->hdev,
+                                       get_phys_addr(ctx, shadow_pte_addr),
+                                       val);
+       *(u64 *) (uintptr_t) shadow_pte_addr = val;
 }
 
-static inline void clear_pte(struct hl_device *hdev, u64 pte_addr)
+/* clear the last and present bits */
+static inline void clear_pte(struct hl_ctx *ctx, u64 pte_addr)
 {
-       /* clear the last and present bits */
-       hdev->asic_funcs->write_pte(hdev, pte_addr, 0);
+       /* no need to transform the value to physical address */
+       write_final_pte(ctx, pte_addr, 0);
 }
 
 static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr)
@@ -98,12 +164,6 @@ static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
        return num_of_ptes_left;
 }
 
-static inline u64 get_hop0_addr(struct hl_ctx *ctx)
-{
-       return ctx->hdev->asic_prop.mmu_pgt_addr +
-                       (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
-}
-
 static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
                                        u64 virt_addr, u64 mask, u64 shift)
 {
@@ -136,7 +196,7 @@ static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
        return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP4_MASK, HOP4_SHIFT);
 }
 
-static inline u64 get_next_hop_addr(u64 curr_pte)
+static inline u64 get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
 {
        if (curr_pte & PAGE_PRESENT_MASK)
                return curr_pte & PHYS_ADDR_MASK;
@@ -147,7 +207,7 @@ static inline u64 get_next_hop_addr(u64 curr_pte)
 static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
                                                bool *is_new_hop)
 {
-       u64 hop_addr = get_next_hop_addr(curr_pte);
+       u64 hop_addr = get_next_hop_addr(ctx, curr_pte);
 
        if (hop_addr == ULLONG_MAX) {
                hop_addr = alloc_hop(ctx);
@@ -157,106 +217,30 @@ static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
        return hop_addr;
 }
 
-/*
- * hl_mmu_init - init the mmu module
- *
- * @hdev: pointer to the habanalabs device structure
- *
- * This function does the following:
- * - Allocate max_asid zeroed hop0 pgts so no mapping is available
- * - Enable mmu in hw
- * - Invalidate the mmu cache
- * - Create a pool of pages for pgts
- * - Returns 0 on success
- *
- * This function depends on DMA QMAN to be working!
- */
-int hl_mmu_init(struct hl_device *hdev)
+/* translates shadow address inside hop to a physical address */
+static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr)
 {
-       struct asic_fixed_properties *prop = &hdev->asic_prop;
-       int rc;
+       u64 page_mask = (ctx->hdev->asic_prop.mmu_hop_table_size - 1);
+       u64 shadow_hop_addr = shadow_addr & ~page_mask;
+       u64 pte_offset = shadow_addr & page_mask;
+       u64 phys_hop_addr;
 
-       if (!hdev->mmu_enable)
-               return 0;
-
-       /* MMU HW init was already done in device hw_init() */
-
-       mutex_init(&hdev->mmu_cache_lock);
-
-       hdev->mmu_pgt_pool =
-                       gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
-
-       if (!hdev->mmu_pgt_pool) {
-               dev_err(hdev->dev, "Failed to create page gen pool\n");
-               rc = -ENOMEM;
-               goto err_pool_create;
-       }
-
-       rc = gen_pool_add(hdev->mmu_pgt_pool, prop->mmu_pgt_addr +
-                       prop->mmu_hop0_tables_total_size,
-                       prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size,
-                       -1);
-       if (rc) {
-               dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
-               goto err_pool_add;
-       }
-
-       return 0;
-
-err_pool_add:
-       gen_pool_destroy(hdev->mmu_pgt_pool);
-err_pool_create:
-       mutex_destroy(&hdev->mmu_cache_lock);
+       if (shadow_hop_addr != get_hop0_addr(ctx))
+               phys_hop_addr = get_pgt_info(ctx, shadow_hop_addr)->phys_addr;
+       else
+               phys_hop_addr = get_phys_hop0_addr(ctx);
 
-       return rc;
+       return phys_hop_addr + pte_offset;
 }
 
-/*
- * hl_mmu_fini - release the mmu module.
- *
- * @hdev: pointer to the habanalabs device structure
- *
- * This function does the following:
- * - Disable mmu in hw
- * - free the pgts pool
- *
- * All ctxs should be freed before calling this func
- */
-void hl_mmu_fini(struct hl_device *hdev)
-{
-       if (!hdev->mmu_enable)
-               return;
-
-       gen_pool_destroy(hdev->mmu_pgt_pool);
-
-       mutex_destroy(&hdev->mmu_cache_lock);
-
-       /* MMU HW fini will be done in device hw_fini() */
-}
-
-/**
- * hl_mmu_ctx_init() - initialize a context for using the MMU module.
- * @ctx: pointer to the context structure to initialize.
- *
- * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
- * page tables hops related to this context and an optional DRAM default page
- * mapping.
- * Return: 0 on success, non-zero otherwise.
- */
-int hl_mmu_ctx_init(struct hl_ctx *ctx)
+static int dram_default_mapping_init(struct hl_ctx *ctx)
 {
        struct hl_device *hdev = ctx->hdev;
        struct asic_fixed_properties *prop = &hdev->asic_prop;
-       u64 num_of_hop3, total_hops, hop1_addr, hop2_addr, hop2_pte_addr,
-               hop3_pte_addr, pte_val;
+       u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
+               hop2_pte_addr, hop3_pte_addr, pte_val;
        int rc, i, j, hop3_allocated = 0;
 
-       if (!hdev->mmu_enable)
-               return 0;
-
-       mutex_init(&ctx->mmu_lock);
-       hash_init(ctx->mmu_hash);
-
        if (!hdev->dram_supports_virtual_memory ||
                        !hdev->dram_default_page_mapping)
                return 0;
@@ -269,10 +253,10 @@ int hl_mmu_ctx_init(struct hl_ctx *ctx)
        total_hops = num_of_hop3 + 2;
 
        ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops,  GFP_KERNEL);
-       if (!ctx->dram_default_hops) {
-               rc = -ENOMEM;
-               goto alloc_err;
-       }
+       if (!ctx->dram_default_hops)
+               return -ENOMEM;
+
+       hop0_addr = get_hop0_addr(ctx);
 
        hop1_addr = alloc_hop(ctx);
        if (hop1_addr == ULLONG_MAX) {
@@ -304,17 +288,17 @@ int hl_mmu_ctx_init(struct hl_ctx *ctx)
 
        /* need only pte 0 in hops 0 and 1 */
        pte_val = (hop1_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
-       hdev->asic_funcs->write_pte(hdev, get_hop0_addr(ctx), pte_val);
+       write_pte(ctx, hop0_addr, pte_val);
 
        pte_val = (hop2_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
-       hdev->asic_funcs->write_pte(hdev, hop1_addr, pte_val);
+       write_pte(ctx, hop1_addr, pte_val);
        get_pte(ctx, hop1_addr);
 
        hop2_pte_addr = hop2_addr;
        for (i = 0 ; i < num_of_hop3 ; i++) {
                pte_val = (ctx->dram_default_hops[i] & PTE_PHYS_ADDR_MASK) |
                                PAGE_PRESENT_MASK;
-               hdev->asic_funcs->write_pte(hdev, hop2_pte_addr, pte_val);
+               write_pte(ctx, hop2_pte_addr, pte_val);
                get_pte(ctx, hop2_addr);
                hop2_pte_addr += HL_PTE_SIZE;
        }
@@ -325,33 +309,183 @@ int hl_mmu_ctx_init(struct hl_ctx *ctx)
        for (i = 0 ; i < num_of_hop3 ; i++) {
                hop3_pte_addr = ctx->dram_default_hops[i];
                for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
-                       hdev->asic_funcs->write_pte(hdev, hop3_pte_addr,
-                                       pte_val);
+                       write_final_pte(ctx, hop3_pte_addr, pte_val);
                        get_pte(ctx, ctx->dram_default_hops[i]);
                        hop3_pte_addr += HL_PTE_SIZE;
                }
        }
 
-       /* flush all writes to reach PCI */
-       mb();
-       hdev->asic_funcs->read_pte(hdev, hop2_addr);
+       flush(ctx);
 
        return 0;
 
 hop3_err:
        for (i = 0 ; i < hop3_allocated ; i++)
                free_hop(ctx, ctx->dram_default_hops[i]);
+
        free_hop(ctx, hop2_addr);
 hop2_err:
        free_hop(ctx, hop1_addr);
 hop1_err:
        kfree(ctx->dram_default_hops);
-alloc_err:
-       mutex_destroy(&ctx->mmu_lock);
 
        return rc;
 }
 
+static void dram_default_mapping_fini(struct hl_ctx *ctx)
+{
+       struct hl_device *hdev = ctx->hdev;
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
+       u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
+               hop2_pte_addr, hop3_pte_addr;
+       int i, j;
+
+       if (!hdev->dram_supports_virtual_memory ||
+                       !hdev->dram_default_page_mapping)
+               return;
+
+       num_of_hop3 = prop->dram_size_for_default_page_mapping;
+       do_div(num_of_hop3, prop->dram_page_size);
+       do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
+
+       hop0_addr = get_hop0_addr(ctx);
+       /* add hop1 and hop2 */
+       total_hops = num_of_hop3 + 2;
+       hop1_addr = ctx->dram_default_hops[total_hops - 1];
+       hop2_addr = ctx->dram_default_hops[total_hops - 2];
+
+       for (i = 0 ; i < num_of_hop3 ; i++) {
+               hop3_pte_addr = ctx->dram_default_hops[i];
+               for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
+                       clear_pte(ctx, hop3_pte_addr);
+                       put_pte(ctx, ctx->dram_default_hops[i]);
+                       hop3_pte_addr += HL_PTE_SIZE;
+               }
+       }
+
+       hop2_pte_addr = hop2_addr;
+       hop2_pte_addr = hop2_addr;
+       for (i = 0 ; i < num_of_hop3 ; i++) {
+               clear_pte(ctx, hop2_pte_addr);
+               put_pte(ctx, hop2_addr);
+               hop2_pte_addr += HL_PTE_SIZE;
+       }
+
+       clear_pte(ctx, hop1_addr);
+       put_pte(ctx, hop1_addr);
+       clear_pte(ctx, hop0_addr);
+
+       kfree(ctx->dram_default_hops);
+
+       flush(ctx);
+}
+
+/**
+ * hl_mmu_init() - initialize the MMU module.
+ * @hdev: habanalabs device structure.
+ *
+ * This function does the following:
+ * - Allocate max_asid zeroed hop0 pgts so no mapping is available.
+ * - Enable MMU in H/W.
+ * - Invalidate the MMU cache.
+ * - Create a pool of pages for pgt_infos.
+ *
+ * This function depends on DMA QMAN to be working!
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+int hl_mmu_init(struct hl_device *hdev)
+{
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
+       int rc;
+
+       if (!hdev->mmu_enable)
+               return 0;
+
+       /* MMU H/W init was already done in device hw_init() */
+
+       mutex_init(&hdev->mmu_cache_lock);
+
+       hdev->mmu_pgt_pool =
+                       gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
+
+       if (!hdev->mmu_pgt_pool) {
+               dev_err(hdev->dev, "Failed to create page gen pool\n");
+               rc = -ENOMEM;
+               goto err_pool_create;
+       }
+
+       rc = gen_pool_add(hdev->mmu_pgt_pool, prop->mmu_pgt_addr +
+                       prop->mmu_hop0_tables_total_size,
+                       prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size,
+                       -1);
+       if (rc) {
+               dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
+               goto err_pool_add;
+       }
+
+       hdev->mmu_shadow_hop0 = kvmalloc_array(prop->max_asid,
+                                       prop->mmu_hop_table_size,
+                                       GFP_KERNEL | __GFP_ZERO);
+       if (!hdev->mmu_shadow_hop0) {
+               rc = -ENOMEM;
+               goto err_pool_add;
+       }
+
+       return 0;
+
+err_pool_add:
+       gen_pool_destroy(hdev->mmu_pgt_pool);
+err_pool_create:
+       mutex_destroy(&hdev->mmu_cache_lock);
+
+       return rc;
+}
+
+/**
+ * hl_mmu_fini() - release the MMU module.
+ * @hdev: habanalabs device structure.
+ *
+ * This function does the following:
+ * - Disable MMU in H/W.
+ * - Free the pgt_infos pool.
+ *
+ * All contexts should be freed before calling this function.
+ */
+void hl_mmu_fini(struct hl_device *hdev)
+{
+       if (!hdev->mmu_enable)
+               return;
+
+       kvfree(hdev->mmu_shadow_hop0);
+       gen_pool_destroy(hdev->mmu_pgt_pool);
+       mutex_destroy(&hdev->mmu_cache_lock);
+
+       /* MMU H/W fini will be done in device hw_fini() */
+}
+
+/**
+ * hl_mmu_ctx_init() - initialize a context for using the MMU module.
+ * @ctx: pointer to the context structure to initialize.
+ *
+ * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
+ * page tables hops related to this context.
+ * Return: 0 on success, non-zero otherwise.
+ */
+int hl_mmu_ctx_init(struct hl_ctx *ctx)
+{
+       struct hl_device *hdev = ctx->hdev;
+
+       if (!hdev->mmu_enable)
+               return 0;
+
+       mutex_init(&ctx->mmu_lock);
+       hash_init(ctx->mmu_phys_hash);
+       hash_init(ctx->mmu_shadow_hash);
+
+       return dram_default_mapping_init(ctx);
+}
+
 /*
  * hl_mmu_ctx_fini - disable a ctx from using the mmu module
  *
@@ -365,63 +499,23 @@ alloc_err:
 void hl_mmu_ctx_fini(struct hl_ctx *ctx)
 {
        struct hl_device *hdev = ctx->hdev;
-       struct asic_fixed_properties *prop = &hdev->asic_prop;
        struct pgt_info *pgt_info;
        struct hlist_node *tmp;
-       u64 num_of_hop3, total_hops, hop1_addr, hop2_addr, hop2_pte_addr,
-               hop3_pte_addr;
-       int i, j;
+       int i;
 
-       if (!ctx->hdev->mmu_enable)
+       if (!hdev->mmu_enable)
                return;
 
-       if (hdev->dram_supports_virtual_memory &&
-                       hdev->dram_default_page_mapping) {
-
-               num_of_hop3 = prop->dram_size_for_default_page_mapping;
-               do_div(num_of_hop3, prop->dram_page_size);
-               do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
-
-               /* add hop1 and hop2 */
-               total_hops = num_of_hop3 + 2;
-               hop1_addr = ctx->dram_default_hops[total_hops - 1];
-               hop2_addr = ctx->dram_default_hops[total_hops - 2];
-
-               for (i = 0 ; i < num_of_hop3 ; i++) {
-                       hop3_pte_addr = ctx->dram_default_hops[i];
-                       for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
-                               clear_pte(hdev, hop3_pte_addr);
-                               put_pte(ctx, ctx->dram_default_hops[i]);
-                               hop3_pte_addr += HL_PTE_SIZE;
-                       }
-               }
+       dram_default_mapping_fini(ctx);
 
-               hop2_pte_addr = hop2_addr;
-               for (i = 0 ; i < num_of_hop3 ; i++) {
-                       clear_pte(hdev, hop2_pte_addr);
-                       put_pte(ctx, hop2_addr);
-                       hop2_pte_addr += HL_PTE_SIZE;
-               }
-
-               clear_pte(hdev, hop1_addr);
-               put_pte(ctx, hop1_addr);
-               clear_pte(hdev, get_hop0_addr(ctx));
-
-               kfree(ctx->dram_default_hops);
-
-               /* flush all writes to reach PCI */
-               mb();
-               hdev->asic_funcs->read_pte(hdev, hop2_addr);
-       }
-
-       if (!hash_empty(ctx->mmu_hash))
+       if (!hash_empty(ctx->mmu_shadow_hash))
                dev_err(hdev->dev, "ctx is freed while it has pgts in use\n");
 
-       hash_for_each_safe(ctx->mmu_hash, i, tmp, pgt_info, node) {
+       hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) {
                dev_err(hdev->dev,
                        "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
-                       pgt_info->addr, ctx->asid, pgt_info->num_of_ptes);
-               free_hop(ctx, pgt_info->addr);
+                       pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
+               free_hop(ctx, pgt_info->shadow_addr);
        }
 
        mutex_destroy(&ctx->mmu_lock);
@@ -437,45 +531,43 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
                hop3_addr = 0, hop3_pte_addr = 0,
                hop4_addr = 0, hop4_pte_addr = 0,
                curr_pte;
-       int clear_hop3 = 1;
-       bool is_dram_addr, is_huge, is_dram_default_page_mapping;
+       bool is_dram_addr, is_huge, clear_hop3 = true;
 
        is_dram_addr = hl_mem_area_inside_range(virt_addr, PAGE_SIZE_2MB,
                                prop->va_space_dram_start_address,
                                prop->va_space_dram_end_address);
 
        hop0_addr = get_hop0_addr(ctx);
-
        hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
 
-       curr_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
+       curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
 
-       hop1_addr = get_next_hop_addr(curr_pte);
+       hop1_addr = get_next_hop_addr(ctx, curr_pte);
 
        if (hop1_addr == ULLONG_MAX)
                goto not_mapped;
 
        hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
 
-       curr_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
+       curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
 
-       hop2_addr = get_next_hop_addr(curr_pte);
+       hop2_addr = get_next_hop_addr(ctx, curr_pte);
 
        if (hop2_addr == ULLONG_MAX)
                goto not_mapped;
 
        hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
 
-       curr_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
+       curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
 
-       hop3_addr = get_next_hop_addr(curr_pte);
+       hop3_addr = get_next_hop_addr(ctx, curr_pte);
 
        if (hop3_addr == ULLONG_MAX)
                goto not_mapped;
 
        hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
 
-       curr_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
+       curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
 
        is_huge = curr_pte & LAST_MASK;
 
@@ -485,27 +577,24 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
                return -EFAULT;
        }
 
-       is_dram_default_page_mapping =
-                       hdev->dram_default_page_mapping && is_dram_addr;
-
        if (!is_huge) {
-               hop4_addr = get_next_hop_addr(curr_pte);
+               hop4_addr = get_next_hop_addr(ctx, curr_pte);
 
                if (hop4_addr == ULLONG_MAX)
                        goto not_mapped;
 
                hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
 
-               curr_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr);
+               curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
 
-               clear_hop3 = 0;
+               clear_hop3 = false;
        }
 
-       if (is_dram_default_page_mapping) {
-               u64 zero_pte = (prop->mmu_dram_default_page_addr &
+       if (hdev->dram_default_page_mapping && is_dram_addr) {
+               u64 default_pte = (prop->mmu_dram_default_page_addr &
                                PTE_PHYS_ADDR_MASK) | LAST_MASK |
                                        PAGE_PRESENT_MASK;
-               if (curr_pte == zero_pte) {
+               if (curr_pte == default_pte) {
                        dev_err(hdev->dev,
                                "DRAM: hop3 PTE points to zero page, can't unmap, va: 0x%llx\n",
                                        virt_addr);
@@ -519,40 +608,43 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
                        goto not_mapped;
                }
 
-               hdev->asic_funcs->write_pte(hdev, hop3_pte_addr, zero_pte);
+               write_final_pte(ctx, hop3_pte_addr, default_pte);
                put_pte(ctx, hop3_addr);
        } else {
                if (!(curr_pte & PAGE_PRESENT_MASK))
                        goto not_mapped;
 
-               clear_pte(hdev, hop4_addr ? hop4_pte_addr : hop3_pte_addr);
+               if (hop4_addr)
+                       clear_pte(ctx, hop4_pte_addr);
+               else
+                       clear_pte(ctx, hop3_pte_addr);
 
                if (hop4_addr && !put_pte(ctx, hop4_addr))
-                       clear_hop3 = 1;
+                       clear_hop3 = true;
 
                if (!clear_hop3)
                        goto flush;
-               clear_pte(hdev, hop3_pte_addr);
+
+               clear_pte(ctx, hop3_pte_addr);
 
                if (put_pte(ctx, hop3_addr))
                        goto flush;
-               clear_pte(hdev, hop2_pte_addr);
+
+               clear_pte(ctx, hop2_pte_addr);
 
                if (put_pte(ctx, hop2_addr))
                        goto flush;
-               clear_pte(hdev, hop1_pte_addr);
+
+               clear_pte(ctx, hop1_pte_addr);
 
                if (put_pte(ctx, hop1_addr))
                        goto flush;
-               clear_pte(hdev, hop0_pte_addr);
+
+               clear_pte(ctx, hop0_pte_addr);
        }
 
 flush:
-       /* flush all writes from all cores to reach PCI */
-       mb();
-
-       hdev->asic_funcs->read_pte(hdev,
-                               hop4_addr ? hop4_pte_addr : hop3_pte_addr);
+       flush(ctx);
 
        return 0;
 
@@ -632,8 +724,7 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
                hop4_addr = 0, hop4_pte_addr = 0,
                curr_pte = 0;
        bool hop1_new = false, hop2_new = false, hop3_new = false,
-               hop4_new = false, is_huge, is_dram_addr,
-               is_dram_default_page_mapping;
+               hop4_new = false, is_huge, is_dram_addr;
        int rc = -ENOMEM;
 
        /*
@@ -654,59 +745,46 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
                return -EFAULT;
        }
 
-       is_dram_default_page_mapping =
-                       hdev->dram_default_page_mapping && is_dram_addr;
-
        hop0_addr = get_hop0_addr(ctx);
-
        hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
-
-       curr_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
+       curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
 
        hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new);
-
        if (hop1_addr == ULLONG_MAX)
                goto err;
 
        hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
-
-       curr_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
+       curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
 
        hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new);
-
        if (hop2_addr == ULLONG_MAX)
                goto err;
 
        hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
-
-       curr_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
+       curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
 
        hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new);
-
        if (hop3_addr == ULLONG_MAX)
                goto err;
 
        hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
-
-       curr_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
+       curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
 
        if (!is_huge) {
                hop4_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop4_new);
-
                if (hop4_addr == ULLONG_MAX)
                        goto err;
 
                hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
-
-               curr_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr);
+               curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
        }
 
-       if (is_dram_default_page_mapping) {
-               u64 zero_pte = (prop->mmu_dram_default_page_addr &
+       if (hdev->dram_default_page_mapping && is_dram_addr) {
+               u64 default_pte = (prop->mmu_dram_default_page_addr &
                                        PTE_PHYS_ADDR_MASK) | LAST_MASK |
                                                PAGE_PRESENT_MASK;
 
-               if (curr_pte != zero_pte) {
+               if (curr_pte != default_pte) {
                        dev_err(hdev->dev,
                                "DRAM: mapping already exists for virt_addr 0x%llx\n",
                                        virt_addr);
@@ -722,27 +800,22 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
                }
        } else if (curr_pte & PAGE_PRESENT_MASK) {
                dev_err(hdev->dev,
-                               "mapping already exists for virt_addr 0x%llx\n",
-                                       virt_addr);
+                       "mapping already exists for virt_addr 0x%llx\n",
+                               virt_addr);
 
                dev_dbg(hdev->dev, "hop0 pte: 0x%llx (0x%llx)\n",
-                               hdev->asic_funcs->read_pte(hdev, hop0_pte_addr),
-                               hop0_pte_addr);
+                       *(u64 *) (uintptr_t) hop0_pte_addr, hop0_pte_addr);
                dev_dbg(hdev->dev, "hop1 pte: 0x%llx (0x%llx)\n",
-                               hdev->asic_funcs->read_pte(hdev, hop1_pte_addr),
-                               hop1_pte_addr);
+                       *(u64 *) (uintptr_t) hop1_pte_addr, hop1_pte_addr);
                dev_dbg(hdev->dev, "hop2 pte: 0x%llx (0x%llx)\n",
-                               hdev->asic_funcs->read_pte(hdev, hop2_pte_addr),
-                               hop2_pte_addr);
+                       *(u64 *) (uintptr_t) hop2_pte_addr, hop2_pte_addr);
                dev_dbg(hdev->dev, "hop3 pte: 0x%llx (0x%llx)\n",
-                               hdev->asic_funcs->read_pte(hdev, hop3_pte_addr),
-                               hop3_pte_addr);
+                       *(u64 *) (uintptr_t) hop3_pte_addr, hop3_pte_addr);
 
                if (!is_huge)
                        dev_dbg(hdev->dev, "hop4 pte: 0x%llx (0x%llx)\n",
-                               hdev->asic_funcs->read_pte(hdev,
-                                                       hop4_pte_addr),
-                                                       hop4_pte_addr);
+                               *(u64 *) (uintptr_t) hop4_pte_addr,
+                               hop4_pte_addr);
 
                rc = -EINVAL;
                goto err;
@@ -751,28 +824,26 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
        curr_pte = (phys_addr & PTE_PHYS_ADDR_MASK) | LAST_MASK
                        | PAGE_PRESENT_MASK;
 
-       hdev->asic_funcs->write_pte(hdev,
-                               is_huge ? hop3_pte_addr : hop4_pte_addr,
-                               curr_pte);
+       if (is_huge)
+               write_final_pte(ctx, hop3_pte_addr, curr_pte);
+       else
+               write_final_pte(ctx, hop4_pte_addr, curr_pte);
 
        if (hop1_new) {
-               curr_pte = (hop1_addr & PTE_PHYS_ADDR_MASK) |
-                               PAGE_PRESENT_MASK;
-               ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop0_pte_addr,
-                               curr_pte);
+               curr_pte =
+                       (hop1_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+               write_pte(ctx, hop0_pte_addr, curr_pte);
        }
        if (hop2_new) {
-               curr_pte = (hop2_addr & PTE_PHYS_ADDR_MASK) |
-                               PAGE_PRESENT_MASK;
-               ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop1_pte_addr,
-                               curr_pte);
+               curr_pte =
+                       (hop2_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+               write_pte(ctx, hop1_pte_addr, curr_pte);
                get_pte(ctx, hop1_addr);
        }
        if (hop3_new) {
-               curr_pte = (hop3_addr & PTE_PHYS_ADDR_MASK) |
-                               PAGE_PRESENT_MASK;
-               ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop2_pte_addr,
-                               curr_pte);
+               curr_pte =
+                       (hop3_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+               write_pte(ctx, hop2_pte_addr, curr_pte);
                get_pte(ctx, hop2_addr);
        }
 
@@ -780,8 +851,7 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
                if (hop4_new) {
                        curr_pte = (hop4_addr & PTE_PHYS_ADDR_MASK) |
                                        PAGE_PRESENT_MASK;
-                       ctx->hdev->asic_funcs->write_pte(ctx->hdev,
-                                       hop3_pte_addr, curr_pte);
+                       write_pte(ctx, hop3_pte_addr, curr_pte);
                        get_pte(ctx, hop3_addr);
                }
 
@@ -790,11 +860,7 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
                get_pte(ctx, hop3_addr);
        }
 
-       /* flush all writes from all cores to reach PCI */
-       mb();
-
-       hdev->asic_funcs->read_pte(hdev,
-                               is_huge ? hop3_pte_addr : hop4_pte_addr);
+       flush(ctx);
 
        return 0;
 
diff --git a/drivers/misc/habanalabs/pci.c b/drivers/misc/habanalabs/pci.c
new file mode 100644 (file)
index 0000000..d472d02
--- /dev/null
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+#include "include/hw_ip/pci/pci_general.h"
+
+#include <linux/pci.h>
+
+/**
+ * hl_pci_bars_map() - Map PCI BARs.
+ * @hdev: Pointer to hl_device structure.
+ * @bar_name: Array of BAR names.
+ * @is_wc: Array with flag per BAR whether a write-combined mapping is needed.
+ *
+ * Request PCI regions and map them to kernel virtual addresses.
+ *
+ * Return: 0 on success, non-zero for failure.
+ */
+int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
+                       bool is_wc[3])
+{
+       struct pci_dev *pdev = hdev->pdev;
+       int rc, i, bar;
+
+       rc = pci_request_regions(pdev, HL_NAME);
+       if (rc) {
+               dev_err(hdev->dev, "Cannot obtain PCI resources\n");
+               return rc;
+       }
+
+       for (i = 0 ; i < 3 ; i++) {
+               bar = i * 2; /* 64-bit BARs */
+               hdev->pcie_bar[bar] = is_wc[i] ?
+                               pci_ioremap_wc_bar(pdev, bar) :
+                               pci_ioremap_bar(pdev, bar);
+               if (!hdev->pcie_bar[bar]) {
+                       dev_err(hdev->dev, "pci_ioremap%s_bar failed for %s\n",
+                                       is_wc[i] ? "_wc" : "", name[i]);
+                       rc = -ENODEV;
+                       goto err;
+               }
+       }
+
+       return 0;
+
+err:
+       for (i = 2 ; i >= 0 ; i--) {
+               bar = i * 2; /* 64-bit BARs */
+               if (hdev->pcie_bar[bar])
+                       iounmap(hdev->pcie_bar[bar]);
+       }
+
+       pci_release_regions(pdev);
+
+       return rc;
+}
+
+/*
+ * hl_pci_bars_unmap() - Unmap PCI BARS.
+ * @hdev: Pointer to hl_device structure.
+ *
+ * Release all PCI BARs and unmap their virtual addresses.
+ */
+static void hl_pci_bars_unmap(struct hl_device *hdev)
+{
+       struct pci_dev *pdev = hdev->pdev;
+       int i, bar;
+
+       for (i = 2 ; i >= 0 ; i--) {
+               bar = i * 2; /* 64-bit BARs */
+               iounmap(hdev->pcie_bar[bar]);
+       }
+
+       pci_release_regions(pdev);
+}
+
+/*
+ * hl_pci_elbi_write() - Write through the ELBI interface.
+ * @hdev: Pointer to hl_device structure.
+ *
+ * Return: 0 on success, negative value for failure.
+ */
+static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
+{
+       struct pci_dev *pdev = hdev->pdev;
+       ktime_t timeout;
+       u32 val;
+
+       /* Clear previous status */
+       pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0);
+
+       pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr);
+       pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
+       pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL,
+                               PCI_CONFIG_ELBI_CTRL_WRITE);
+
+       timeout = ktime_add_ms(ktime_get(), 10);
+       for (;;) {
+               pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val);
+               if (val & PCI_CONFIG_ELBI_STS_MASK)
+                       break;
+               if (ktime_compare(ktime_get(), timeout) > 0) {
+                       pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS,
+                                               &val);
+                       break;
+               }
+
+               usleep_range(300, 500);
+       }
+
+       if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
+               return 0;
+
+       if (val & PCI_CONFIG_ELBI_STS_ERR) {
+               dev_err(hdev->dev, "Error writing to ELBI\n");
+               return -EIO;
+       }
+
+       if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
+               dev_err(hdev->dev, "ELBI write didn't finish in time\n");
+               return -EIO;
+       }
+
+       dev_err(hdev->dev, "ELBI write has undefined bits in status\n");
+       return -EIO;
+}
+
+/**
+ * hl_pci_iatu_write() - iatu write routine.
+ * @hdev: Pointer to hl_device structure.
+ *
+ * Return: 0 on success, negative value for failure.
+ */
+int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
+{
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
+       u32 dbi_offset;
+       int rc;
+
+       dbi_offset = addr & 0xFFF;
+
+       rc = hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
+       rc |= hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
+                               data);
+
+       if (rc)
+               return -EIO;
+
+       return 0;
+}
+
+/*
+ * hl_pci_reset_link_through_bridge() - Reset PCI link.
+ * @hdev: Pointer to hl_device structure.
+ */
+static void hl_pci_reset_link_through_bridge(struct hl_device *hdev)
+{
+       struct pci_dev *pdev = hdev->pdev;
+       struct pci_dev *parent_port;
+       u16 val;
+
+       parent_port = pdev->bus->self;
+       pci_read_config_word(parent_port, PCI_BRIDGE_CONTROL, &val);
+       val |= PCI_BRIDGE_CTL_BUS_RESET;
+       pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
+       ssleep(1);
+
+       val &= ~(PCI_BRIDGE_CTL_BUS_RESET);
+       pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
+       ssleep(3);
+}
+
+/**
+ * hl_pci_set_dram_bar_base() - Set DDR BAR to map specific device address.
+ * @hdev: Pointer to hl_device structure.
+ * @inbound_region: Inbound region number.
+ * @bar: PCI BAR number.
+ * @addr: Address in DRAM. Must be aligned to DRAM bar size.
+ *
+ * Configure the iATU so that the DRAM bar will start at the specified address.
+ *
+ * Return: 0 on success, negative value for failure.
+ */
+int hl_pci_set_dram_bar_base(struct hl_device *hdev, u8 inbound_region, u8 bar,
+                               u64 addr)
+{
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
+       u32 offset;
+       int rc;
+
+       switch (inbound_region) {
+       case 0:
+               offset = 0x100;
+               break;
+       case 1:
+               offset = 0x300;
+               break;
+       case 2:
+               offset = 0x500;
+               break;
+       default:
+               dev_err(hdev->dev, "Invalid inbound region %d\n",
+                       inbound_region);
+               return -EINVAL;
+       }
+
+       if (bar != 0 && bar != 2 && bar != 4) {
+               dev_err(hdev->dev, "Invalid PCI BAR %d\n", bar);
+               return -EINVAL;
+       }
+
+       /* Point to the specified address */
+       rc = hl_pci_iatu_write(hdev, offset + 0x14, lower_32_bits(addr));
+       rc |= hl_pci_iatu_write(hdev, offset + 0x18, upper_32_bits(addr));
+       rc |= hl_pci_iatu_write(hdev, offset + 0x0, 0);
+       /* Enable + BAR match + match enable + BAR number */
+       rc |= hl_pci_iatu_write(hdev, offset + 0x4, 0xC0080000 | (bar << 8));
+
+       /* Return the DBI window to the default location */
+       rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
+       rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
+
+       if (rc)
+               dev_err(hdev->dev, "failed to map DRAM bar to 0x%08llx\n",
+                       addr);
+
+       return rc;
+}
+
+/**
+ * hl_pci_init_iatu() - Initialize the iATU unit inside the PCI controller.
+ * @hdev: Pointer to hl_device structure.
+ * @sram_base_address: SRAM base address.
+ * @dram_base_address: DRAM base address.
+ * @host_phys_size: Size of host memory for device transactions.
+ *
+ * This is needed in case the firmware doesn't initialize the iATU.
+ *
+ * Return: 0 on success, negative value for failure.
+ */
+int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
+                       u64 dram_base_address, u64 host_phys_size)
+{
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
+       u64 host_phys_end_addr;
+       int rc = 0;
+
+       /* Inbound Region 0 - Bar 0 - Point to SRAM base address */
+       rc  = hl_pci_iatu_write(hdev, 0x114, lower_32_bits(sram_base_address));
+       rc |= hl_pci_iatu_write(hdev, 0x118, upper_32_bits(sram_base_address));
+       rc |= hl_pci_iatu_write(hdev, 0x100, 0);
+       /* Enable + Bar match + match enable */
+       rc |= hl_pci_iatu_write(hdev, 0x104, 0xC0080000);
+
+       /* Point to DRAM */
+       if (!hdev->asic_funcs->set_dram_bar_base)
+               return -EINVAL;
+       rc |= hdev->asic_funcs->set_dram_bar_base(hdev, dram_base_address);
+
+       /* Outbound Region 0 - Point to Host */
+       host_phys_end_addr = prop->host_phys_base_address + host_phys_size - 1;
+       rc |= hl_pci_iatu_write(hdev, 0x008,
+                               lower_32_bits(prop->host_phys_base_address));
+       rc |= hl_pci_iatu_write(hdev, 0x00C,
+                               upper_32_bits(prop->host_phys_base_address));
+       rc |= hl_pci_iatu_write(hdev, 0x010, lower_32_bits(host_phys_end_addr));
+       rc |= hl_pci_iatu_write(hdev, 0x014, 0);
+       rc |= hl_pci_iatu_write(hdev, 0x018, 0);
+       rc |= hl_pci_iatu_write(hdev, 0x020, upper_32_bits(host_phys_end_addr));
+       /* Increase region size */
+       rc |= hl_pci_iatu_write(hdev, 0x000, 0x00002000);
+       /* Enable */
+       rc |= hl_pci_iatu_write(hdev, 0x004, 0x80000000);
+
+       /* Return the DBI window to the default location */
+       rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
+       rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
+
+       if (rc)
+               return -EIO;
+
+       return 0;
+}
+
+/**
+ * hl_pci_set_dma_mask() - Set DMA masks for the device.
+ * @hdev: Pointer to hl_device structure.
+ * @dma_mask: number of bits for the requested dma mask.
+ *
+ * This function sets the DMA masks (regular and consistent) for a specified
+ * value. If it doesn't succeed, it tries to set it to a fall-back value
+ *
+ * Return: 0 on success, non-zero for failure.
+ */
+int hl_pci_set_dma_mask(struct hl_device *hdev, u8 dma_mask)
+{
+       struct pci_dev *pdev = hdev->pdev;
+       int rc;
+
+       /* set DMA mask */
+       rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_mask));
+       if (rc) {
+               dev_warn(hdev->dev,
+                       "Failed to set pci dma mask to %d bits, error %d\n",
+                       dma_mask, rc);
+
+               dma_mask = hdev->dma_mask;
+
+               rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_mask));
+               if (rc) {
+                       dev_err(hdev->dev,
+                               "Failed to set pci dma mask to %d bits, error %d\n",
+                               dma_mask, rc);
+                       return rc;
+               }
+       }
+
+       /*
+        * We managed to set the dma mask, so update the dma mask field. If
+        * the set to the coherent mask will fail with that mask, we will
+        * fail the entire function
+        */
+       hdev->dma_mask = dma_mask;
+
+       rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_mask));
+       if (rc) {
+               dev_err(hdev->dev,
+                       "Failed to set pci consistent dma mask to %d bits, error %d\n",
+                       dma_mask, rc);
+               return rc;
+       }
+
+       return 0;
+}
+
+/**
+ * hl_pci_init() - PCI initialization code.
+ * @hdev: Pointer to hl_device structure.
+ * @dma_mask: number of bits for the requested dma mask.
+ *
+ * Set DMA masks, initialize the PCI controller and map the PCI BARs.
+ *
+ * Return: 0 on success, non-zero for failure.
+ */
+int hl_pci_init(struct hl_device *hdev, u8 dma_mask)
+{
+       struct pci_dev *pdev = hdev->pdev;
+       int rc;
+
+       rc = hl_pci_set_dma_mask(hdev, dma_mask);
+       if (rc)
+               return rc;
+
+       if (hdev->reset_pcilink)
+               hl_pci_reset_link_through_bridge(hdev);
+
+       rc = pci_enable_device_mem(pdev);
+       if (rc) {
+               dev_err(hdev->dev, "can't enable PCI device\n");
+               return rc;
+       }
+
+       pci_set_master(pdev);
+
+       rc = hdev->asic_funcs->init_iatu(hdev);
+       if (rc) {
+               dev_err(hdev->dev, "Failed to initialize iATU\n");
+               goto disable_device;
+       }
+
+       rc = hdev->asic_funcs->pci_bars_map(hdev);
+       if (rc) {
+               dev_err(hdev->dev, "Failed to initialize PCI BARs\n");
+               goto disable_device;
+       }
+
+       return 0;
+
+disable_device:
+       pci_clear_master(pdev);
+       pci_disable_device(pdev);
+
+       return rc;
+}
+
+/**
+ * hl_fw_fini() - PCI finalization code.
+ * @hdev: Pointer to hl_device structure
+ *
+ * Unmap PCI bars and disable PCI device.
+ */
+void hl_pci_fini(struct hl_device *hdev)
+{
+       hl_pci_bars_unmap(hdev);
+
+       pci_clear_master(hdev->pdev);
+       pci_disable_device(hdev->pdev);
+}
index 74e2c667dce0c0fc9747a6a3474d9f81cc06ad8e..9d7b3719bfa013485e0a72a6594fc9956d28f6f4 100644 (file)
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
 config INTEL_MEI
        tristate "Intel Management Engine Interface"
        depends on X86 && PCI
@@ -44,12 +46,4 @@ config INTEL_MEI_TXE
          Supported SoCs:
          Intel Bay Trail
 
-config INTEL_MEI_HDCP
-       tristate "Intel HDCP2.2 services of ME Interface"
-       select INTEL_MEI_ME
-       depends on DRM_I915
-       help
-         MEI Support for HDCP2.2 Services on Intel platforms.
-
-         Enables the ME FW services required for HDCP2.2 support through
-         I915 display driver of Intel.
+source "drivers/misc/mei/hdcp/Kconfig"
index 8c2d9565a4cb30a094c6d8ba35c2005b8adbdd61..f1c76f7ee8042ad792de183ae74cd4204d9a1be7 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 #
+# Copyright (c) 2010-2019, Intel Corporation. All rights reserved.
 # Makefile - Intel Management Engine Interface (Intel MEI) Linux driver
-# Copyright (c) 2010-2014, Intel Corporation.
 #
 obj-$(CONFIG_INTEL_MEI) += mei.o
 mei-objs := init.o
index 5fcac02233af9d961444b79cea043a49cdb6871d..32e9b1aed2ca5570511fca78d6fcc93aa9abaca0 100644 (file)
@@ -1,17 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- *
+ * Copyright (c) 2013-2019, Intel Corporation. All rights reserved.
  * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2018, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
  */
 
 #include <linux/kernel.h>
index 65bec998eb6ecd8a52e9b3e5fa9e860f7c9e55e4..985bd4fd33281171836edfeda621cf0f5e7f954c 100644 (file)
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
+ * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
  * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2012-2013, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
  */
 
 #include <linux/module.h>
index ca4c9cc218a22b9644957f04efa0d28142aab41c..1e3edbbacb1e29d35babd37dcdfb8867ef4c1c26 100644 (file)
@@ -1,17 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- *
+ * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
  * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
  */
 
 #include <linux/sched/signal.h>
@@ -679,7 +669,7 @@ int mei_cl_unlink(struct mei_cl *cl)
 
 void mei_host_client_init(struct mei_device *dev)
 {
-       dev->dev_state = MEI_DEV_ENABLED;
+       mei_set_devstate(dev, MEI_DEV_ENABLED);
        dev->reset_count = 0;
 
        schedule_work(&dev->bus_rescan_work);
index 64e318f589b42fbf41ad738cfa44dfc752caa5aa..c1f9e810cf8138bf924fdcd0bda5b03b36a415ef 100644 (file)
@@ -1,17 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
- *
+ * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
  * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
  */
 
 #ifndef _MEI_CLIENT_H_
index 7b5df8fd6c5ad84dff00d5f580af0f0b6d7daca2..0970142bcace1da555e4dd00b26885d3b2eae952 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- *
+ * Copyright (c) 2012-2016, Intel Corporation. All rights reserved
  * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2012-2013, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
  */
+
 #include <linux/slab.h>
 #include <linux/kernel.h>
 #include <linux/device.h>
index 795641b8218102ab1a7d8fa026f20cc8c7cacc8f..ef56f849b251d77b87767d489eefa314c8cc4768 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2018 Intel Corporation. All rights reserved.
  */
 #include <linux/dma-mapping.h>
 #include <linux/mei.h>
index e6207f6148163e01cbcc2c5fbd1a0e31a38350a9..a44094cdbc36c307406a829586e9708b1fb3a124 100644 (file)
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- *
+ * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
  * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
  */
-
 #include <linux/export.h>
 #include <linux/sched.h>
 #include <linux/wait.h>
index 0171a7e79babab0c5157f46c97c378dbd0d2c2f2..5aa58cffdd2e5bc7b3ce2b80c7b6e8c33c0ab95f 100644 (file)
@@ -1,17 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
- *
+ * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
  * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
  */
 
 #ifndef _MEI_HBM_H_
diff --git a/drivers/misc/mei/hdcp/Kconfig b/drivers/misc/mei/hdcp/Kconfig
new file mode 100644 (file)
index 0000000..95b2d6d
--- /dev/null
@@ -0,0 +1,13 @@
+
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2019, Intel Corporation. All rights reserved.
+#
+config INTEL_MEI_HDCP
+       tristate "Intel HDCP2.2 services of ME Interface"
+       select INTEL_MEI_ME
+       depends on DRM_I915
+       help
+         MEI Support for HDCP2.2 Services on Intel platforms.
+
+         Enables the ME FW services required for HDCP2.2 support through
+         I915 display driver of Intel.
index adbe7506282d950c7d0e3f9f1e6c0ac9297c9ce2..3fbb56485ce8fc2dd45dbe9bbd3cdf17551c0855 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 #
-# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Intel Corporation. All rights reserved.
 #
 # Makefile - HDCP client driver for Intel MEI Bus Driver.
 
index 90b6ae8e9dae14c4e1d31633bbd4f5c7cff80d8d..b07000202d4a1ad093b67523422078cce34a142f 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0)
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright Â© 2019 Intel Corporation
  *
index 5f74b908e486ba52b0b1c4e52ea6c973713e321b..e4b1cd54c853d520078bd78aeb5491cfd99e1858 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+) */
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright Â© 2019 Intel Corporation
  *
index bb1ee9834a029d28fdd690ed660f6f6355c89b67..d74b182e19f3889d6bc03061c7aa4bc159896773 100644 (file)
@@ -1,68 +1,8 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
  * Intel Management Engine Interface (Intel MEI) Linux driver
- * Intel MEI Interface Header
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *     Intel Corporation.
- *     linux-mei@linux.intel.com
- *     http://www.intel.com
- *
- * BSD LICENSE
- *
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
+ */
 #ifndef _MEI_HW_MEI_REGS_H_
 #define _MEI_HW_MEI_REGS_H_
 
index 3fbbadfa2ae1524f20db3837398de7cebdcf00f7..de21e30835269e77803f65f9d0c3b84a3f1b96ad 100644 (file)
@@ -1,17 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- *
+ * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
  * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
  */
 
 #include <linux/pci.h>
index bbcc5fc106cdf46418acd2dcd4adfe6f0c1f5383..08c84a0de4a8dbca3a097a4f2691002eda43d1f5 100644 (file)
@@ -1,21 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
- *
+ * Copyright (c) 2012-2018, Intel Corporation. All rights reserved.
  * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
  */
 
-
-
 #ifndef _MEI_INTERFACE_H_
 #define _MEI_INTERFACE_H_
 
index f19229c4e65586dcb39697e928afa48e7731c0ec..a92b306dac8b6ccfe031cf81d81935edba09535a 100644 (file)
@@ -1,63 +1,8 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (c) 2013-2014, Intel Corporation. All rights reserved.
  * Intel Management Engine Interface (Intel MEI) Linux driver
- * Intel MEI Interface Header
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING
- *
- * Contact Information:
- *     Intel Corporation.
- *     linux-mei@linux.intel.com
- *     http://www.intel.com
- *
- * BSD LICENSE
- *
- * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
+ */
 #ifndef _MEI_HW_TXE_REGS_H_
 #define _MEI_HW_TXE_REGS_H_
 
index 8449fe0367ff4d65b7b06905490dc2ab3090ac4e..5e58656b8e197947d75367c42ebf1f8d15322463 100644 (file)
@@ -1,17 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- *
+ * Copyright (c) 2013-2014, Intel Corporation. All rights reserved.
  * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2013-2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
  */
 
 #include <linux/pci.h>
index e1e8b66d764881f15c0e348f5429900e2d30a0d7..96511b04bf88959e0d92b78ad2f21a9ae21c0966 100644 (file)
@@ -1,17 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
- *
+ * Copyright (c) 2013-2016, Intel Corporation. All rights reserved.
  * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2013-2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
  */
 
 #ifndef _MEI_HW_TXE_H_
index b7d2487b84094656641e42260ea8363d7aaf0cbd..d025a5f8317e080b7880952384b3d360f1776ab6 100644 (file)
@@ -1,17 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
- *
+ * Copyright (c) 2003-2018, Intel Corporation. All rights reserved
  * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
  */
 
 #ifndef _MEI_HW_TYPES_H_
index eb026e2a0537a07a180575843c37a6c9c4baeebd..b9fef773e71b53c318f17b5cbcda377831a577d2 100644 (file)
@@ -1,17 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- *
+ * Copyright (c) 2012-2018, Intel Corporation. All rights reserved.
  * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
  */
 
 #include <linux/export.h>
@@ -133,12 +123,12 @@ int mei_reset(struct mei_device *dev)
 
        /* enter reset flow */
        interrupts_enabled = state != MEI_DEV_POWER_DOWN;
-       dev->dev_state = MEI_DEV_RESETTING;
+       mei_set_devstate(dev, MEI_DEV_RESETTING);
 
        dev->reset_count++;
        if (dev->reset_count > MEI_MAX_CONSEC_RESET) {
                dev_err(dev->dev, "reset: reached maximal consecutive resets: disabling the device\n");
-               dev->dev_state = MEI_DEV_DISABLED;
+               mei_set_devstate(dev, MEI_DEV_DISABLED);
                return -ENODEV;
        }
 
@@ -160,7 +150,7 @@ int mei_reset(struct mei_device *dev)
 
        if (state == MEI_DEV_POWER_DOWN) {
                dev_dbg(dev->dev, "powering down: end of reset\n");
-               dev->dev_state = MEI_DEV_DISABLED;
+               mei_set_devstate(dev, MEI_DEV_DISABLED);
                return 0;
        }
 
@@ -172,11 +162,11 @@ int mei_reset(struct mei_device *dev)
 
        dev_dbg(dev->dev, "link is established start sending messages.\n");
 
-       dev->dev_state = MEI_DEV_INIT_CLIENTS;
+       mei_set_devstate(dev, MEI_DEV_INIT_CLIENTS);
        ret = mei_hbm_start_req(dev);
        if (ret) {
                dev_err(dev->dev, "hbm_start failed ret = %d\n", ret);
-               dev->dev_state = MEI_DEV_RESETTING;
+               mei_set_devstate(dev, MEI_DEV_RESETTING);
                return ret;
        }
 
@@ -206,7 +196,7 @@ int mei_start(struct mei_device *dev)
 
        dev->reset_count = 0;
        do {
-               dev->dev_state = MEI_DEV_INITIALIZING;
+               mei_set_devstate(dev, MEI_DEV_INITIALIZING);
                ret = mei_reset(dev);
 
                if (ret == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) {
@@ -241,7 +231,7 @@ int mei_start(struct mei_device *dev)
        return 0;
 err:
        dev_err(dev->dev, "link layer initialization failed.\n");
-       dev->dev_state = MEI_DEV_DISABLED;
+       mei_set_devstate(dev, MEI_DEV_DISABLED);
        mutex_unlock(&dev->device_lock);
        return -ENODEV;
 }
@@ -260,7 +250,7 @@ int mei_restart(struct mei_device *dev)
 
        mutex_lock(&dev->device_lock);
 
-       dev->dev_state = MEI_DEV_POWER_UP;
+       mei_set_devstate(dev, MEI_DEV_POWER_UP);
        dev->reset_count = 0;
 
        err = mei_reset(dev);
@@ -311,7 +301,7 @@ void mei_stop(struct mei_device *dev)
        dev_dbg(dev->dev, "stopping the device.\n");
 
        mutex_lock(&dev->device_lock);
-       dev->dev_state = MEI_DEV_POWER_DOWN;
+       mei_set_devstate(dev, MEI_DEV_POWER_DOWN);
        mutex_unlock(&dev->device_lock);
        mei_cl_bus_remove_devices(dev);
 
@@ -324,7 +314,7 @@ void mei_stop(struct mei_device *dev)
 
        mei_reset(dev);
        /* move device to disabled state unconditionally */
-       dev->dev_state = MEI_DEV_DISABLED;
+       mei_set_devstate(dev, MEI_DEV_DISABLED);
 
        mutex_unlock(&dev->device_lock);
 }
index 055c2d89b310819521bd83a68b2de30f7524da7d..c70a8c74cc57a1a04beb893ae69730e0e3b6f3ba 100644 (file)
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- *
+ * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
  * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
  */
 
-
 #include <linux/export.h>
 #include <linux/kthread.h>
 #include <linux/interrupt.h>
index 87281b3695e607c023ab1fd8a0bc6a5d29bf971b..ad02097d7fee564523ac364e4798d200a7aeb45e 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- *
+ * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
  * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2018, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
  */
+
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/kernel.h>
 #include "mei_dev.h"
 #include "client.h"
 
+static struct class *mei_class;
+static dev_t mei_devt;
+#define MEI_MAX_DEVS  MINORMASK
+static DEFINE_MUTEX(mei_minor_lock);
+static DEFINE_IDR(mei_idr);
+
 /**
  * mei_open - the open function
  *
@@ -838,12 +835,65 @@ static ssize_t fw_ver_show(struct device *device,
 }
 static DEVICE_ATTR_RO(fw_ver);
 
+/**
+ * dev_state_show - display device state
+ *
+ * @device: device pointer
+ * @attr: attribute pointer
+ * @buf:  char out buffer
+ *
+ * Return: number of the bytes printed into buf or error
+ */
+static ssize_t dev_state_show(struct device *device,
+                             struct device_attribute *attr, char *buf)
+{
+       struct mei_device *dev = dev_get_drvdata(device);
+       enum mei_dev_state dev_state;
+
+       mutex_lock(&dev->device_lock);
+       dev_state = dev->dev_state;
+       mutex_unlock(&dev->device_lock);
+
+       return sprintf(buf, "%s", mei_dev_state_str(dev_state));
+}
+static DEVICE_ATTR_RO(dev_state);
+
+static int match_devt(struct device *dev, const void *data)
+{
+       const dev_t *devt = data;
+
+       return dev->devt == *devt;
+}
+
+/**
+ * dev_set_devstate: set to new device state and notify sysfs file.
+ *
+ * @dev: mei_device
+ * @state: new device state
+ */
+void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state)
+{
+       struct device *clsdev;
+
+       if (dev->dev_state == state)
+               return;
+
+       dev->dev_state = state;
+
+       clsdev = class_find_device(mei_class, NULL, &dev->cdev.dev, match_devt);
+       if (clsdev) {
+               sysfs_notify(&clsdev->kobj, NULL, "dev_state");
+               put_device(clsdev);
+       }
+}
+
 static struct attribute *mei_attrs[] = {
        &dev_attr_fw_status.attr,
        &dev_attr_hbm_ver.attr,
        &dev_attr_hbm_ver_drv.attr,
        &dev_attr_tx_queue_limit.attr,
        &dev_attr_fw_ver.attr,
+       &dev_attr_dev_state.attr,
        NULL
 };
 ATTRIBUTE_GROUPS(mei);
@@ -867,12 +917,6 @@ static const struct file_operations mei_fops = {
        .llseek = no_llseek
 };
 
-static struct class *mei_class;
-static dev_t mei_devt;
-#define MEI_MAX_DEVS  MINORMASK
-static DEFINE_MUTEX(mei_minor_lock);
-static DEFINE_IDR(mei_idr);
-
 /**
  * mei_minor_get - obtain next free device minor number
  *
index 374edde72a1428d0cd8a1cbf477c748c6d12823a..48d4c4fcefd217e9db2263c2b714762383d0ef33 100644 (file)
@@ -1,17 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- *
+ * Copyright (c) 2015-2016, Intel Corporation. All rights reserved.
  * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
  */
 #include <linux/module.h>
 
index b52e9b97a7c07690cb36c6dbd09324c7684b4303..df758033dc937c81ea68dcadf23c32bdc83d2ce8 100644 (file)
@@ -1,17 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
- *
+ * Copyright (c) 2015-2016, Intel Corporation. All rights reserved.
  * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
  */
 
 #if !defined(_MEI_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
index 685b78ce30a54c0c3a75d7d4c98af3e1536ee250..fca832fcac57f0eb0122fecc4cc65dcc2eff133b 100644 (file)
@@ -1,17 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
- *
+ * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
  * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2018, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
  */
 
 #ifndef _MEI_DEV_H_
@@ -535,7 +525,6 @@ struct mei_device {
        struct dentry *dbgfs_dir;
 #endif /* CONFIG_DEBUG_FS */
 
-
        const struct mei_hw_ops *ops;
        char hw[0] __aligned(sizeof(void *));
 };
@@ -594,6 +583,8 @@ int mei_restart(struct mei_device *dev);
 void mei_stop(struct mei_device *dev);
 void mei_cancel_work(struct mei_device *dev);
 
+void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state);
+
 int mei_dmam_ring_alloc(struct mei_device *dev);
 void mei_dmam_ring_free(struct mei_device *dev);
 bool mei_dma_ring_is_allocated(struct mei_device *dev);
index 3ab946ad32570ce645d18950d8be8d8582f0484a..7a2b3545a7f9c2fe9fa75ec8652c442a9fcb7919 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- *
+ * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
  * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
  */
+
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/kernel.h>
index e1b909123fb02d25539c8645cc51e4f4960035ff..2e37fc2e0fa8087f29910e18ca82bb03efcdc06e 100644 (file)
@@ -1,17 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- *
+ * Copyright (c) 2013-2017, Intel Corporation. All rights reserved.
  * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2013-2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
  */
 
 #include <linux/module.h>
index 9e443df44b3b4ee3901a5f857796246bac754e49..0c6de97dd347edf21935543f32d776cf3b23bf4c 100644 (file)
@@ -572,6 +572,7 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
 
                xpc_wakeup_channel_mgr(part);
        }
+               /* fall through */
        case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV:
                spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
                part_uv->flags |= XPC_P_ENGAGED_UV;
index 82a97866e0cf4c857cbb25c5487a3c6f80addfa0..7c8f203f9a24d38bbd1c2b870644e511a1db76d3 100644 (file)
@@ -48,7 +48,6 @@ struct alcor_sdmmc_host {
        struct mmc_command *cmd;
        struct mmc_data *data;
        unsigned int dma_on:1;
-       unsigned int early_data:1;
 
        struct mutex cmd_mutex;
 
@@ -144,8 +143,7 @@ static void alcor_data_set_dma(struct alcor_sdmmc_host *host)
        host->sg_count--;
 }
 
-static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host,
-                                       bool early)
+static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host)
 {
        struct alcor_pci_priv *priv = host->alcor_pci;
        struct mmc_data *data = host->data;
@@ -155,13 +153,6 @@ static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host,
                ctrl |= AU6601_DATA_WRITE;
 
        if (data->host_cookie == COOKIE_MAPPED) {
-               if (host->early_data) {
-                       host->early_data = false;
-                       return;
-               }
-
-               host->early_data = early;
-
                alcor_data_set_dma(host);
                ctrl |= AU6601_DATA_DMA_MODE;
                host->dma_on = 1;
@@ -231,6 +222,7 @@ static void alcor_prepare_sg_miter(struct alcor_sdmmc_host *host)
 static void alcor_prepare_data(struct alcor_sdmmc_host *host,
                               struct mmc_command *cmd)
 {
+       struct alcor_pci_priv *priv = host->alcor_pci;
        struct mmc_data *data = cmd->data;
 
        if (!data)
@@ -248,7 +240,7 @@ static void alcor_prepare_data(struct alcor_sdmmc_host *host,
        if (data->host_cookie != COOKIE_MAPPED)
                alcor_prepare_sg_miter(host);
 
-       alcor_trigger_data_transfer(host, true);
+       alcor_write8(priv, 0, AU6601_DATA_XFER_CTRL);
 }
 
 static void alcor_send_cmd(struct alcor_sdmmc_host *host,
@@ -435,7 +427,7 @@ static int alcor_cmd_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
        if (!host->data)
                return false;
 
-       alcor_trigger_data_transfer(host, false);
+       alcor_trigger_data_transfer(host);
        host->cmd = NULL;
        return true;
 }
@@ -456,7 +448,7 @@ static void alcor_cmd_irq_thread(struct alcor_sdmmc_host *host, u32 intmask)
        if (!host->data)
                alcor_request_complete(host, 1);
        else
-               alcor_trigger_data_transfer(host, false);
+               alcor_trigger_data_transfer(host);
        host->cmd = NULL;
 }
 
@@ -487,15 +479,9 @@ static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
                break;
        case AU6601_INT_READ_BUF_RDY:
                alcor_trf_block_pio(host, true);
-               if (!host->blocks)
-                       break;
-               alcor_trigger_data_transfer(host, false);
                return 1;
        case AU6601_INT_WRITE_BUF_RDY:
                alcor_trf_block_pio(host, false);
-               if (!host->blocks)
-                       break;
-               alcor_trigger_data_transfer(host, false);
                return 1;
        case AU6601_INT_DMA_END:
                if (!host->sg_count)
@@ -508,8 +494,14 @@ static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
                break;
        }
 
-       if (intmask & AU6601_INT_DATA_END)
-               return 0;
+       if (intmask & AU6601_INT_DATA_END) {
+               if (!host->dma_on && host->blocks) {
+                       alcor_trigger_data_transfer(host);
+                       return 1;
+               } else {
+                       return 0;
+               }
+       }
 
        return 1;
 }
index 5bbed477c9b1ee6546f066e55fd9946ed4a160b6..9f20fff9781b0791ea36c1bd3cd657dd1b50e8c6 100644 (file)
@@ -797,6 +797,43 @@ void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
        sdhci_reset(host, mask);
 }
 
+#define CMD_ERR_MASK (SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX |\
+                     SDHCI_INT_TIMEOUT)
+#define CMD_MASK (CMD_ERR_MASK | SDHCI_INT_RESPONSE)
+
+static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask)
+{
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+
+       if (omap_host->is_tuning && host->cmd && !host->data_early &&
+           (intmask & CMD_ERR_MASK)) {
+
+               /*
+                * Since we are not resetting data lines during tuning
+                * operation, data error or data complete interrupts
+                * might still arrive. Mark this request as a failure
+                * but still wait for the data interrupt
+                */
+               if (intmask & SDHCI_INT_TIMEOUT)
+                       host->cmd->error = -ETIMEDOUT;
+               else
+                       host->cmd->error = -EILSEQ;
+
+               host->cmd = NULL;
+
+               /*
+                * Sometimes command error interrupts and command complete
+                * interrupt will arrive together. Clear all command related
+                * interrupts here.
+                */
+               sdhci_writel(host, intmask & CMD_MASK, SDHCI_INT_STATUS);
+               intmask &= ~CMD_MASK;
+       }
+
+       return intmask;
+}
+
 static struct sdhci_ops sdhci_omap_ops = {
        .set_clock = sdhci_omap_set_clock,
        .set_power = sdhci_omap_set_power,
@@ -807,6 +844,7 @@ static struct sdhci_ops sdhci_omap_ops = {
        .platform_send_init_74_clocks = sdhci_omap_init_74_clocks,
        .reset = sdhci_omap_reset,
        .set_uhs_signaling = sdhci_omap_set_uhs_signaling,
+       .irq = sdhci_omap_irq,
 };
 
 static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
index 72428b6bfc474ba6d757b94d79ff62804cc7c8ec..7b7286b4d81ef660d22a9ca93e5f0f2870ea5666 100644 (file)
@@ -1876,7 +1876,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
                        continue;
                }
 
-               if (time_after(jiffies, timeo) && !chip_ready(map, adr))
+               /*
+                * We check "time_after" and "!chip_good" before checking "chip_good" to avoid
+                * the failure due to scheduling.
+                */
+               if (time_after(jiffies, timeo) && !chip_good(map, adr, datum))
                        break;
 
                if (chip_good(map, adr, datum)) {
index b59708c35fafe87a926f4b47c81d89ba56f695e4..ee610721098e628d01ad988b1a9cad40aeef6f70 100644 (file)
@@ -3213,8 +3213,12 @@ static int bond_netdev_event(struct notifier_block *this,
                return NOTIFY_DONE;
 
        if (event_dev->flags & IFF_MASTER) {
+               int ret;
+
                netdev_dbg(event_dev, "IFF_MASTER\n");
-               return bond_master_netdev_event(event, event_dev);
+               ret = bond_master_netdev_event(event, event_dev);
+               if (ret != NOTIFY_DONE)
+                       return ret;
        }
 
        if (event_dev->flags & IFF_SLAVE) {
index 2f120b2ffef0cfd7d97f6a901f9552fcc58288df..4985268e227330045e1cc0a6f3dadcb81e4c2830 100644 (file)
@@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count);
 
 static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
 {
-       return sprintf(buf, "%pM\n", slave->perm_hwaddr);
+       return sprintf(buf, "%*phC\n",
+                      slave->dev->addr_len,
+                      slave->perm_hwaddr);
 }
 static SLAVE_ATTR_RO(perm_hwaddr);
 
index dce84a2a65c71eeec36d10fa9ceb6df0a487866a..c44b2822e4dd064e2cba3a9ca1b0af457893e47e 100644 (file)
@@ -427,18 +427,22 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
                return 0;
 
        lane = mv88e6390x_serdes_get_lane(chip, port);
-       if (lane < 0)
+       if (lane < 0 && lane != -ENODEV)
                return lane;
 
-       if (chip->ports[port].serdes_irq) {
-               err = mv88e6390_serdes_irq_disable(chip, port, lane);
+       if (lane >= 0) {
+               if (chip->ports[port].serdes_irq) {
+                       err = mv88e6390_serdes_irq_disable(chip, port, lane);
+                       if (err)
+                               return err;
+               }
+
+               err = mv88e6390x_serdes_power(chip, port, false);
                if (err)
                        return err;
        }
 
-       err = mv88e6390x_serdes_power(chip, port, false);
-       if (err)
-               return err;
+       chip->ports[port].cmode = 0;
 
        if (cmode) {
                err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
@@ -452,6 +456,12 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
                if (err)
                        return err;
 
+               chip->ports[port].cmode = cmode;
+
+               lane = mv88e6390x_serdes_get_lane(chip, port);
+               if (lane < 0)
+                       return lane;
+
                err = mv88e6390x_serdes_power(chip, port, true);
                if (err)
                        return err;
@@ -463,8 +473,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
                }
        }
 
-       chip->ports[port].cmode = cmode;
-
        return 0;
 }
 
index a9bdc21873d32f31620ac169f8aff5b76cd02f7f..10ff37d6dc783b796c690a4d73bc90caa4cad931 100644 (file)
@@ -957,7 +957,7 @@ int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
        bnx2x_sample_bulletin(bp);
 
        if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
-               BNX2X_ERR("Hypervisor will dicline the request, avoiding\n");
+               BNX2X_ERR("Hypervisor will decline the request, avoiding\n");
                rc = -EINVAL;
                goto out;
        }
index 0bb9d7b3a2b622211a2d401fd81dd70f6cff4ea0..4c586ba4364bab671d011877bde4fee1a2c30083 100644 (file)
@@ -1133,6 +1133,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
        tpa_info = &rxr->rx_tpa[agg_id];
 
        if (unlikely(cons != rxr->rx_next_cons)) {
+               netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
+                           cons, rxr->rx_next_cons);
                bnxt_sched_reset(bp, rxr);
                return;
        }
@@ -1585,15 +1587,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
        }
 
        cons = rxcmp->rx_cmp_opaque;
-       rx_buf = &rxr->rx_buf_ring[cons];
-       data = rx_buf->data;
-       data_ptr = rx_buf->data_ptr;
        if (unlikely(cons != rxr->rx_next_cons)) {
                int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
 
+               netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
+                           cons, rxr->rx_next_cons);
                bnxt_sched_reset(bp, rxr);
                return rc1;
        }
+       rx_buf = &rxr->rx_buf_ring[cons];
+       data = rx_buf->data;
+       data_ptr = rx_buf->data_ptr;
        prefetch(data_ptr);
 
        misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
@@ -1610,11 +1614,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 
        rx_buf->data = NULL;
        if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
+               u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
+
                bnxt_reuse_rx_data(rxr, cons, data);
                if (agg_bufs)
                        bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
 
                rc = -EIO;
+               if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
+                       netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
+                       bnxt_sched_reset(bp, rxr);
+               }
                goto next_rx;
        }
 
index 328373e0578ff83bf5d5bb336103deba33144e99..060a6f386104ac5511a381a9ef308f956fc6f551 100644 (file)
@@ -4283,7 +4283,7 @@ static void tg3_power_down(struct tg3 *tp)
        pci_set_power_state(tp->pdev, PCI_D3hot);
 }
 
-static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
+static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
 {
        switch (val & MII_TG3_AUX_STAT_SPDMASK) {
        case MII_TG3_AUX_STAT_10HALF:
@@ -4787,7 +4787,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
        bool current_link_up;
        u32 bmsr, val;
        u32 lcl_adv, rmt_adv;
-       u16 current_speed;
+       u32 current_speed;
        u8 current_duplex;
        int i, err;
 
@@ -5719,7 +5719,7 @@ out:
 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
 {
        u32 orig_pause_cfg;
-       u16 orig_active_speed;
+       u32 orig_active_speed;
        u8 orig_active_duplex;
        u32 mac_status;
        bool current_link_up;
@@ -5823,7 +5823,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
 {
        int err = 0;
        u32 bmsr, bmcr;
-       u16 current_speed = SPEED_UNKNOWN;
+       u32 current_speed = SPEED_UNKNOWN;
        u8 current_duplex = DUPLEX_UNKNOWN;
        bool current_link_up = false;
        u32 local_adv, remote_adv, sgsr;
index a772a33b685c5eb8c28137107eb33cb4b6ffeb1d..6953d0546acb320196887a51d07312977d4f49ed 100644 (file)
@@ -2873,7 +2873,7 @@ struct tg3_tx_ring_info {
 struct tg3_link_config {
        /* Describes what we're trying to get. */
        u32                             advertising;
-       u16                             speed;
+       u32                             speed;
        u8                              duplex;
        u8                              autoneg;
        u8                              flowctrl;
@@ -2882,7 +2882,7 @@ struct tg3_link_config {
        u8                              active_flowctrl;
 
        u8                              active_duplex;
-       u16                             active_speed;
+       u32                             active_speed;
        u32                             rmt_adv;
 };
 
index 1522aee81884bdf702b32e1cd8cbae4316e988b5..3da2795e248638abbc8d50a194dd8061325aad41 100644 (file)
@@ -898,7 +898,9 @@ static void macb_tx_interrupt(struct macb_queue *queue)
 
                        /* First, update TX stats if needed */
                        if (skb) {
-                               if (gem_ptp_do_txstamp(queue, skb, desc) == 0) {
+                               if (unlikely(skb_shinfo(skb)->tx_flags &
+                                            SKBTX_HW_TSTAMP) &&
+                                   gem_ptp_do_txstamp(queue, skb, desc) == 0) {
                                        /* skb now belongs to timestamp buffer
                                         * and will be removed later
                                         */
index aa2be480719134f720e9487a3c71b4272cc8efe3..c032bef1b776d74ea4886e8fbddca40c8b7dd868 100644 (file)
 #define DRV_NAME       "nicvf"
 #define DRV_VERSION    "1.0"
 
+/* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs
+ * the buffer to be contiguous. Allow XDP to be set up only if we don't exceed
+ * this value, keeping headroom for the 14 byte Ethernet header and two
+ * VLAN tags (for QinQ)
+ */
+#define MAX_XDP_MTU    (1530 - ETH_HLEN - VLAN_HLEN * 2)
+
 /* Supported devices */
 static const struct pci_device_id nicvf_id_table[] = {
        { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
@@ -1328,10 +1335,11 @@ int nicvf_stop(struct net_device *netdev)
        struct nicvf_cq_poll *cq_poll = NULL;
        union nic_mbx mbx = {};
 
-       cancel_delayed_work_sync(&nic->link_change_work);
-
        /* wait till all queued set_rx_mode tasks completes */
-       drain_workqueue(nic->nicvf_rx_mode_wq);
+       if (nic->nicvf_rx_mode_wq) {
+               cancel_delayed_work_sync(&nic->link_change_work);
+               drain_workqueue(nic->nicvf_rx_mode_wq);
+       }
 
        mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
        nicvf_send_msg_to_pf(nic, &mbx);
@@ -1452,7 +1460,8 @@ int nicvf_open(struct net_device *netdev)
        struct nicvf_cq_poll *cq_poll = NULL;
 
        /* wait till all queued set_rx_mode tasks completes if any */
-       drain_workqueue(nic->nicvf_rx_mode_wq);
+       if (nic->nicvf_rx_mode_wq)
+               drain_workqueue(nic->nicvf_rx_mode_wq);
 
        netif_carrier_off(netdev);
 
@@ -1550,10 +1559,12 @@ int nicvf_open(struct net_device *netdev)
        /* Send VF config done msg to PF */
        nicvf_send_cfg_done(nic);
 
-       INIT_DELAYED_WORK(&nic->link_change_work,
-                         nicvf_link_status_check_task);
-       queue_delayed_work(nic->nicvf_rx_mode_wq,
-                          &nic->link_change_work, 0);
+       if (nic->nicvf_rx_mode_wq) {
+               INIT_DELAYED_WORK(&nic->link_change_work,
+                                 nicvf_link_status_check_task);
+               queue_delayed_work(nic->nicvf_rx_mode_wq,
+                                  &nic->link_change_work, 0);
+       }
 
        return 0;
 cleanup:
@@ -1578,6 +1589,15 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
        struct nicvf *nic = netdev_priv(netdev);
        int orig_mtu = netdev->mtu;
 
+       /* For now just support only the usual MTU sized frames,
+        * plus some headroom for VLAN, QinQ.
+        */
+       if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) {
+               netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
+                           netdev->mtu);
+               return -EINVAL;
+       }
+
        netdev->mtu = new_mtu;
 
        if (!netif_running(netdev))
@@ -1826,8 +1846,10 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
        bool bpf_attached = false;
        int ret = 0;
 
-       /* For now just support only the usual MTU sized frames */
-       if (prog && (dev->mtu > 1500)) {
+       /* For now just support only the usual MTU sized frames,
+        * plus some headroom for VLAN, QinQ.
+        */
+       if (prog && dev->mtu > MAX_XDP_MTU) {
                netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
                            dev->mtu);
                return -EOPNOTSUPP;
index 5b4d3badcb730b1417739d508bae8b3838afaaf9..e246f9733bb89161ceb2c6d39fcbe330b469ca61 100644 (file)
@@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic,
        /* Check if page can be recycled */
        if (page) {
                ref_count = page_ref_count(page);
-               /* Check if this page has been used once i.e 'put_page'
-                * called after packet transmission i.e internal ref_count
-                * and page's ref_count are equal i.e page can be recycled.
+               /* This page can be recycled if internal ref_count and page's
+                * ref_count are equal, indicating that the page has been used
+                * once for packet transmission. For non-XDP mode, internal
+                * ref_count is always '1'.
                 */
-               if (rbdr->is_xdp && (ref_count == pgcache->ref_count))
-                       pgcache->ref_count--;
-               else
-                       page = NULL;
-
-               /* In non-XDP mode, page's ref_count needs to be '1' for it
-                * to be recycled.
-                */
-               if (!rbdr->is_xdp && (ref_count != 1))
+               if (rbdr->is_xdp) {
+                       if (ref_count == pgcache->ref_count)
+                               pgcache->ref_count--;
+                       else
+                               page = NULL;
+               } else if (ref_count != 1) {
                        page = NULL;
+               }
        }
 
        if (!page) {
@@ -365,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
        while (head < rbdr->pgcnt) {
                pgcache = &rbdr->pgcache[head];
                if (pgcache->page && page_ref_count(pgcache->page) != 0) {
-                       if (!rbdr->is_xdp) {
-                               put_page(pgcache->page);
-                               continue;
+                       if (rbdr->is_xdp) {
+                               page_ref_sub(pgcache->page,
+                                            pgcache->ref_count - 1);
                        }
-                       page_ref_sub(pgcache->page, pgcache->ref_count - 1);
                        put_page(pgcache->page);
                }
                head++;
index 74849be5f004f59552892cf642a9b02efb393ac7..e2919005ead3e1592999b140841b0234344e87b7 100644 (file)
@@ -354,7 +354,10 @@ static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
                ppmax = max;
 
        /* pool size must be multiple of unsigned long */
-       bmap = BITS_TO_LONGS(ppmax);
+       bmap = ppmax / BITS_PER_TYPE(unsigned long);
+       if (!bmap)
+               return NULL;
+
        ppmax = (bmap * sizeof(unsigned long)) << 3;
 
        alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
@@ -402,6 +405,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
        if (reserve_factor) {
                ppmax_pool = ppmax / reserve_factor;
                pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max);
+               if (!pool) {
+                       ppmax_pool = 0;
+                       reserve_factor = 0;
+               }
 
                pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n",
                         ndev->name, ppmax, ppmax_pool, pool_index_max);
index 697c2427f2b70c06c87dd00ae23d4e0b06d4fc3d..a96ad20ee4843e9cdd02c55a3e7286a51679efea 100644 (file)
@@ -1840,13 +1840,9 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
        int ret;
 
        if (enable) {
-               ret = clk_prepare_enable(fep->clk_ahb);
-               if (ret)
-                       return ret;
-
                ret = clk_prepare_enable(fep->clk_enet_out);
                if (ret)
-                       goto failed_clk_enet_out;
+                       return ret;
 
                if (fep->clk_ptp) {
                        mutex_lock(&fep->ptp_clk_mutex);
@@ -1866,7 +1862,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
 
                phy_reset_after_clk_enable(ndev->phydev);
        } else {
-               clk_disable_unprepare(fep->clk_ahb);
                clk_disable_unprepare(fep->clk_enet_out);
                if (fep->clk_ptp) {
                        mutex_lock(&fep->ptp_clk_mutex);
@@ -1885,8 +1880,6 @@ failed_clk_ref:
 failed_clk_ptp:
        if (fep->clk_enet_out)
                clk_disable_unprepare(fep->clk_enet_out);
-failed_clk_enet_out:
-               clk_disable_unprepare(fep->clk_ahb);
 
        return ret;
 }
@@ -3470,6 +3463,9 @@ fec_probe(struct platform_device *pdev)
        ret = clk_prepare_enable(fep->clk_ipg);
        if (ret)
                goto failed_clk_ipg;
+       ret = clk_prepare_enable(fep->clk_ahb);
+       if (ret)
+               goto failed_clk_ahb;
 
        fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
        if (!IS_ERR(fep->reg_phy)) {
@@ -3563,6 +3559,9 @@ failed_reset:
        pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 failed_regulator:
+       clk_disable_unprepare(fep->clk_ahb);
+failed_clk_ahb:
+       clk_disable_unprepare(fep->clk_ipg);
 failed_clk_ipg:
        fec_enet_clk_enable(ndev, false);
 failed_clk:
@@ -3686,6 +3685,7 @@ static int __maybe_unused fec_runtime_suspend(struct device *dev)
        struct net_device *ndev = dev_get_drvdata(dev);
        struct fec_enet_private *fep = netdev_priv(ndev);
 
+       clk_disable_unprepare(fep->clk_ahb);
        clk_disable_unprepare(fep->clk_ipg);
 
        return 0;
@@ -3695,8 +3695,20 @@ static int __maybe_unused fec_runtime_resume(struct device *dev)
 {
        struct net_device *ndev = dev_get_drvdata(dev);
        struct fec_enet_private *fep = netdev_priv(ndev);
+       int ret;
 
-       return clk_prepare_enable(fep->clk_ipg);
+       ret = clk_prepare_enable(fep->clk_ahb);
+       if (ret)
+               return ret;
+       ret = clk_prepare_enable(fep->clk_ipg);
+       if (ret)
+               goto failed_clk_ipg;
+
+       return 0;
+
+failed_clk_ipg:
+       clk_disable_unprepare(fep->clk_ahb);
+       return ret;
 }
 
 static const struct dev_pm_ops fec_pm_ops = {
index 79d03f8ee7b180d2cab9a2a647254461c0a0cb08..c7fa97a7e1f4d4b07dd6b00f7c5c7bffca4a0356 100644 (file)
@@ -150,7 +150,6 @@ out_buffer_fail:
 /* free desc along with its attached buffer */
 static void hnae_free_desc(struct hnae_ring *ring)
 {
-       hnae_free_buffers(ring);
        dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
                         ring->desc_num * sizeof(ring->desc[0]),
                         ring_to_dma_dir(ring));
@@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring)
 /* fini ring, also free the buffer for the ring */
 static void hnae_fini_ring(struct hnae_ring *ring)
 {
+       if (is_rx_ring(ring))
+               hnae_free_buffers(ring);
+
        hnae_free_desc(ring);
        kfree(ring->desc_cb);
        ring->desc_cb = NULL;
index 08a750fb60c49d397c61845130e153fe1e3b0b3e..d6fb8343723041992d12cfa505f222822bd603fd 100644 (file)
@@ -357,7 +357,7 @@ struct hnae_buf_ops {
 };
 
 struct hnae_queue {
-       void __iomem *io_base;
+       u8 __iomem *io_base;
        phys_addr_t phy_base;
        struct hnae_ae_dev *dev;        /* the device who use this queue */
        struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp;
index a97228c93831d69fe2211317486f14a25197e740..6c0507921623b843bb2010321782ace35a24b8a3 100644 (file)
@@ -370,7 +370,7 @@ int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn)
 static void hns_mac_param_get(struct mac_params *param,
                              struct hns_mac_cb *mac_cb)
 {
-       param->vaddr = (void *)mac_cb->vaddr;
+       param->vaddr = mac_cb->vaddr;
        param->mac_mode = hns_get_enet_interface(mac_cb);
        ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr);
        param->mac_id = mac_cb->mac_id;
index fbc75341bef760b82a1d7a10469d5649db91e366..22589799f1a575127f77f733e9e21a28889efa46 100644 (file)
@@ -187,7 +187,7 @@ struct mac_statistics {
 /*mac para struct ,mac get param from nic or dsaf when initialize*/
 struct mac_params {
        char addr[ETH_ALEN];
-       void *vaddr; /*virtual address*/
+       u8 __iomem *vaddr; /*virtual address*/
        struct device *dev;
        u8 mac_id;
        /**< Ethernet operation mode (MAC-PHY interface and speed) */
@@ -402,7 +402,7 @@ struct mac_driver {
        enum mac_mode mac_mode;
        u8 mac_id;
        struct hns_mac_cb *mac_cb;
-       void __iomem *io_base;
+       u8 __iomem *io_base;
        unsigned int mac_en_flg;/*you'd better don't enable mac twice*/
        unsigned int virt_dev_num;
        struct device *dev;
index ac55db065f167ad58f9ec41966afa7b0299b5f40..61eea6ac846fcfce9d254351221b998dd20a6e47 100644 (file)
@@ -1602,8 +1602,6 @@ static void hns_dsaf_set_mac_key(
                       DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
        dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
                       DSAF_TBL_TCAM_KEY_PORT_S, port);
-
-       mac_key->low.bits.port_vlan = le16_to_cpu(mac_key->low.bits.port_vlan);
 }
 
 /**
@@ -1663,8 +1661,8 @@ int hns_dsaf_set_mac_uc_entry(
        /* default config dvc to 0 */
        mac_data.tbl_ucast_dvc = 0;
        mac_data.tbl_ucast_out_port = mac_entry->port_num;
-       tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
-       tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+       tcam_data.tbl_tcam_data_high = mac_key.high.val;
+       tcam_data.tbl_tcam_data_low = mac_key.low.val;
 
        hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data);
 
@@ -1786,9 +1784,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
                                     0xff,
                                     mc_mask);
 
-               mask_key.high.val = le32_to_cpu(mask_key.high.val);
-               mask_key.low.val = le32_to_cpu(mask_key.low.val);
-
                pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
        }
 
@@ -1840,8 +1835,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
                dsaf_dev->ae_dev.name, mac_key.high.val,
                mac_key.low.val, entry_index);
 
-       tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
-       tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+       tcam_data.tbl_tcam_data_high = mac_key.high.val;
+       tcam_data.tbl_tcam_data_low = mac_key.low.val;
 
        /* config mc entry with mask */
        hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data,
@@ -1956,9 +1951,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
                /* config key mask */
                hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask);
 
-               mask_key.high.val = le32_to_cpu(mask_key.high.val);
-               mask_key.low.val = le32_to_cpu(mask_key.low.val);
-
                pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
        }
 
@@ -2012,8 +2004,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
                soft_mac_entry += entry_index;
                soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
        } else { /* not zero, just del port, update */
-               tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
-               tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+               tcam_data.tbl_tcam_data_high = mac_key.high.val;
+               tcam_data.tbl_tcam_data_low = mac_key.low.val;
 
                hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
                                     &tcam_data,
@@ -2750,6 +2742,17 @@ int hns_dsaf_get_regs_count(void)
        return DSAF_DUMP_REGS_NUM;
 }
 
+static int hns_dsaf_get_port_id(u8 port)
+{
+       if (port < DSAF_SERVICE_NW_NUM)
+               return port;
+
+       if (port >= DSAF_BASE_INNER_PORT_NUM)
+               return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+
+       return -EINVAL;
+}
+
 static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
 {
        struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
@@ -2815,23 +2818,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
        memset(&temp_key, 0x0, sizeof(temp_key));
        mask_entry.addr[0] = 0x01;
        hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
-                            port, mask_entry.addr);
+                            0xf, mask_entry.addr);
        tbl_tcam_mcast.tbl_mcast_item_vld = 1;
        tbl_tcam_mcast.tbl_mcast_old_en = 0;
 
-       if (port < DSAF_SERVICE_NW_NUM) {
-               mskid = port;
-       } else if (port >= DSAF_BASE_INNER_PORT_NUM) {
-               mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
-       } else {
+       /* set MAC port to handle multicast */
+       mskid = hns_dsaf_get_port_id(port);
+       if (mskid == -EINVAL) {
                dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
                        dsaf_dev->ae_dev.name, port,
                        mask_key.high.val, mask_key.low.val);
                return;
        }
+       dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
+                    mskid % 32, 1);
 
+       /* set pool bit map to handle multicast */
+       mskid = hns_dsaf_get_port_id(port_num);
+       if (mskid == -EINVAL) {
+               dev_err(dsaf_dev->dev,
+                       "%s, pool bit map pnum(%d)error,key(%#x:%#x)\n",
+                       dsaf_dev->ae_dev.name, port_num,
+                       mask_key.high.val, mask_key.low.val);
+               return;
+       }
        dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
                     mskid % 32, 1);
+
        memcpy(&temp_key, &mask_key, sizeof(mask_key));
        hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
                                   (struct dsaf_tbl_tcam_data *)(&mask_key),
index 0e1cd99831a6083faa790aa80be1f6c635b15a50..76cc8887e1a83599c9178e88787ecb6c6a2b8784 100644 (file)
@@ -467,4 +467,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
                             u8 mac_id, u8 port_num);
 int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
 
+int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
+
 #endif /* __HNS_DSAF_MAIN_H__ */
index 16294cd3c95459891c65080cd49c61f839afaa60..19b94879691f86e0ec969e40ed5a057cb24c3d7b 100644 (file)
@@ -670,7 +670,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
                dsaf_set_field(origin, 1ull << 10, 10, en);
                dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
        } else {
-               u8 *base_addr = (u8 *)mac_cb->serdes_vaddr +
+               u8 __iomem *base_addr = mac_cb->serdes_vaddr +
                                (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
                dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en);
        }
index 3d07c8a7639dad46c5b810b19f56ba84fdfc655a..17c019106e6e40d8f281deb87b1928367582e371 100644 (file)
@@ -61,7 +61,7 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb,
        }
 }
 
-static void __iomem *
+static u8 __iomem *
 hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
 {
        return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET;
@@ -111,8 +111,8 @@ hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
        dsaf_dev->ppe_common[comm_index] = NULL;
 }
 
-static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
-                                       int ppe_idx)
+static u8 __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
+                                     int ppe_idx)
 {
        return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET;
 }
index f670e63a5a018cd5b48b4a62093c104905aa4463..110c6e8222c7038a6eb24c1854490845293e320d 100644 (file)
@@ -80,7 +80,7 @@ struct hns_ppe_cb {
        struct hns_ppe_hw_stats hw_stats;
 
        u8 index;       /* index in a ppe common device */
-       void __iomem *io_base;
+       u8 __iomem *io_base;
        int virq;
        u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */
        u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */
@@ -89,7 +89,7 @@ struct hns_ppe_cb {
 struct ppe_common_cb {
        struct device *dev;
        struct dsaf_device *dsaf_dev;
-       void __iomem *io_base;
+       u8 __iomem *io_base;
 
        enum ppe_common_mode ppe_mode;
 
index 6bf346c11b25a5c87bceca81abf1bfc83188d768..ac3518ca4d7bec5b3737cef78081202dfcf1f53d 100644 (file)
@@ -458,7 +458,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
                mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT;
        } else {
                ring = &q->tx_ring;
-               ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base +
+               ring->io_base = ring_pair_cb->q.io_base +
                        HNS_RCB_TX_REG_OFFSET;
                irq_idx = HNS_RCB_IRQ_IDX_TX;
                mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT :
@@ -764,7 +764,7 @@ static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
        }
 }
 
-static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
+static u8 __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
 {
        struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
 
index b9733b0b848263bc9a25ccf42f3d8b433a7b9e5e..b9e7f11f08968099c76a99c3f119e77a909c72ea 100644 (file)
 #define XGMAC_PAUSE_CTL_RSP_MODE_B     2
 #define XGMAC_PAUSE_CTL_TX_XOFF_B      3
 
-static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
+static inline void dsaf_write_reg(u8 __iomem *base, u32 reg, u32 value)
 {
        writel(value, base + reg);
 }
@@ -1053,7 +1053,7 @@ static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val)
 #define dsaf_set_bit(origin, shift, val) \
        dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
 
-static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
+static inline void dsaf_set_reg_field(u8 __iomem *base, u32 reg, u32 mask,
                                      u32 shift, u32 val)
 {
        u32 origin = dsaf_read_reg(base, reg);
@@ -1073,7 +1073,7 @@ static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
 #define dsaf_get_bit(origin, shift) \
        dsaf_get_field((origin), (1ull << (shift)), (shift))
 
-static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
+static inline u32 dsaf_get_reg_field(u8 __iomem *base, u32 reg, u32 mask,
                                     u32 shift)
 {
        u32 origin;
@@ -1089,11 +1089,11 @@ static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
        dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit))
 
 #define dsaf_write_b(addr, data)\
-       writeb((data), (__iomem unsigned char *)(addr))
+       writeb((data), (__iomem u8 *)(addr))
 #define dsaf_read_b(addr)\
-       readb((__iomem unsigned char *)(addr))
+       readb((__iomem u8 *)(addr))
 
 #define hns_mac_reg_read64(drv, offset) \
-       readq((__iomem void *)(((u8 *)(drv)->io_base + 0xc00 + (offset))))
+       readq((__iomem void *)(((drv)->io_base + 0xc00 + (offset))))
 
 #endif /* _DSAF_REG_H */
index ba4316910dea1726da855c13b78e95bb6bd36a3c..a60f207768fc7152edbbf9a5394afaa080260d8b 100644 (file)
@@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv)
        dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0);
        dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1);
        dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0);
-       dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
+       dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
 }
 
 /**
index 60e7d7ae3787c280d9b21319a1493ccc5d570de8..4cd86ba1f050dcf786238c147cd2fc3b799797bc 100644 (file)
@@ -29,9 +29,6 @@
 
 #define SERVICE_TIMER_HZ (1 * HZ)
 
-#define NIC_TX_CLEAN_MAX_NUM 256
-#define NIC_RX_CLEAN_MAX_NUM 64
-
 #define RCB_IRQ_NOT_INITED 0
 #define RCB_IRQ_INITED 1
 #define HNS_BUFFER_SIZE_2048 2048
@@ -376,8 +373,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
        wmb(); /* commit all data before submit */
        assert(skb->queue_mapping < priv->ae_handle->q_num);
        hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
-       ring->stats.tx_pkts++;
-       ring->stats.tx_bytes += skb->len;
 
        return NETDEV_TX_OK;
 
@@ -999,6 +994,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
                /* issue prefetch for next Tx descriptor */
                prefetch(&ring->desc_cb[ring->next_to_clean]);
        }
+       /* update tx ring statistics. */
+       ring->stats.tx_pkts += pkts;
+       ring->stats.tx_bytes += bytes;
 
        NETIF_TX_UNLOCK(ring);
 
@@ -2152,7 +2150,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
                        hns_nic_tx_fini_pro_v2;
 
                netif_napi_add(priv->netdev, &rd->napi,
-                              hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
+                              hns_nic_common_poll, NAPI_POLL_WEIGHT);
                rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
        }
        for (i = h->q_num; i < h->q_num * 2; i++) {
@@ -2165,7 +2163,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
                        hns_nic_rx_fini_pro_v2;
 
                netif_napi_add(priv->netdev, &rd->napi,
-                              hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
+                              hns_nic_common_poll, NAPI_POLL_WEIGHT);
                rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
        }
 
index fffe8c1c45d394b2a0723443582d6427ffa6e387..0fb61d440d3bb96a1d5b24b4c5380b3b0c33de4e 100644 (file)
@@ -3,7 +3,7 @@
 # Makefile for the HISILICON network device drivers.
 #
 
-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
 
 obj-$(CONFIG_HNS3_HCLGE) += hclge.o
 hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o  hclge_debugfs.o
index fb93bbd358455a735880d6e83cd314c0773a636a..6193f8fa7cf34aa142f575ca903f42d666847f31 100644 (file)
@@ -3,7 +3,7 @@
 # Makefile for the HISILICON network device drivers.
 #
 
-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
 
 obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
 hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o
\ No newline at end of file
index baf5cc251f3299499f3fc03ee17513aa740110f6..8b8a7d00e8e0c92d23a9ca67683ce98a0417fddc 100644 (file)
@@ -39,7 +39,7 @@ struct hns_mdio_sc_reg {
 };
 
 struct hns_mdio_device {
-       void *vbase;            /* mdio reg base address */
+       u8 __iomem *vbase;              /* mdio reg base address */
        struct regmap *subctrl_vbase;
        struct hns_mdio_sc_reg sc_reg;
 };
@@ -96,21 +96,17 @@ enum mdio_c45_op_seq {
 #define MDIO_SC_CLK_ST         0x531C
 #define MDIO_SC_RESET_ST       0x5A1C
 
-static void mdio_write_reg(void *base, u32 reg, u32 value)
+static void mdio_write_reg(u8 __iomem *base, u32 reg, u32 value)
 {
-       u8 __iomem *reg_addr = (u8 __iomem *)base;
-
-       writel_relaxed(value, reg_addr + reg);
+       writel_relaxed(value, base + reg);
 }
 
 #define MDIO_WRITE_REG(a, reg, value) \
        mdio_write_reg((a)->vbase, (reg), (value))
 
-static u32 mdio_read_reg(void *base, u32 reg)
+static u32 mdio_read_reg(u8 __iomem *base, u32 reg)
 {
-       u8 __iomem *reg_addr = (u8 __iomem *)base;
-
-       return readl_relaxed(reg_addr + reg);
+       return readl_relaxed(base + reg);
 }
 
 #define mdio_set_field(origin, mask, shift, val) \
@@ -121,7 +117,7 @@ static u32 mdio_read_reg(void *base, u32 reg)
 
 #define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask))
 
-static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
+static void mdio_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift,
                               u32 val)
 {
        u32 origin = mdio_read_reg(base, reg);
@@ -133,7 +129,7 @@ static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
 #define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \
        mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val))
 
-static u32 mdio_get_reg_field(void *base, u32 reg, u32 mask, u32 shift)
+static u32 mdio_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift)
 {
        u32 origin;
 
index 5ecbb1adcf3b9d45fa756af5682245933aa06eb3..3dfb2d131eb76f29c6129e8f29f477ab7be840db 100644 (file)
@@ -1885,6 +1885,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
         */
        adapter->state = VNIC_PROBED;
 
+       reinit_completion(&adapter->init_done);
        rc = init_crq_queue(adapter);
        if (rc) {
                netdev_err(adapter->netdev,
@@ -3761,6 +3762,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
 {
        struct device *dev = &adapter->vdev->dev;
        struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
+       netdev_features_t old_hw_features = 0;
        union ibmvnic_crq crq;
        int i;
 
@@ -3836,24 +3838,41 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
        adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
        adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
 
-       adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO;
+       if (adapter->state != VNIC_PROBING) {
+               old_hw_features = adapter->netdev->hw_features;
+               adapter->netdev->hw_features = 0;
+       }
+
+       adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
 
        if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
-               adapter->netdev->features |= NETIF_F_IP_CSUM;
+               adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
 
        if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
-               adapter->netdev->features |= NETIF_F_IPV6_CSUM;
+               adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
 
        if ((adapter->netdev->features &
            (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
-               adapter->netdev->features |= NETIF_F_RXCSUM;
+               adapter->netdev->hw_features |= NETIF_F_RXCSUM;
 
        if (buf->large_tx_ipv4)
-               adapter->netdev->features |= NETIF_F_TSO;
+               adapter->netdev->hw_features |= NETIF_F_TSO;
        if (buf->large_tx_ipv6)
-               adapter->netdev->features |= NETIF_F_TSO6;
+               adapter->netdev->hw_features |= NETIF_F_TSO6;
+
+       if (adapter->state == VNIC_PROBING) {
+               adapter->netdev->features |= adapter->netdev->hw_features;
+       } else if (old_hw_features != adapter->netdev->hw_features) {
+               netdev_features_t tmp = 0;
 
-       adapter->netdev->hw_features |= adapter->netdev->features;
+               /* disable features no longer supported */
+               adapter->netdev->features &= adapter->netdev->hw_features;
+               /* turn on features now supported if previously enabled */
+               tmp = (old_hw_features ^ adapter->netdev->hw_features) &
+                       adapter->netdev->hw_features;
+               adapter->netdev->features |=
+                               tmp & adapter->netdev->wanted_features;
+       }
 
        memset(&crq, 0, sizeof(crq));
        crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
@@ -4625,7 +4644,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
        old_num_rx_queues = adapter->req_rx_queues;
        old_num_tx_queues = adapter->req_tx_queues;
 
-       init_completion(&adapter->init_done);
+       reinit_completion(&adapter->init_done);
        adapter->init_done_rc = 0;
        ibmvnic_send_crq_init(adapter);
        if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4680,7 +4699,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
 
        adapter->from_passive_init = false;
 
-       init_completion(&adapter->init_done);
        adapter->init_done_rc = 0;
        ibmvnic_send_crq_init(adapter);
        if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4759,6 +4777,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
        INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
        INIT_LIST_HEAD(&adapter->rwi_list);
        spin_lock_init(&adapter->rwi_lock);
+       init_completion(&adapter->init_done);
        adapter->resetting = false;
 
        adapter->mac_change_pending = false;
index 5a0419421511fd7a9a3e44a1a7fa2c4e21f26217..ecef949f3baae022d46082f4a1dc32d18cb082ec 100644 (file)
@@ -41,6 +41,8 @@ static int __init fm10k_init_module(void)
        /* create driver workqueue */
        fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
                                          fm10k_driver_name);
+       if (!fm10k_workqueue)
+               return -ENOMEM;
 
        fm10k_dbg_init();
 
index d684998ba2b03b27230916afb7012951ad987a94..d3cc3427caad187ecf5bf967404f86182082790a 100644 (file)
@@ -790,6 +790,8 @@ struct i40e_vsi {
 
        /* VSI specific handlers */
        irqreturn_t (*irq_handler)(int irq, void *data);
+
+       unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
 } ____cacheline_internodealigned_in_smp;
 
 struct i40e_netdev_priv {
@@ -1096,20 +1098,6 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
        return !!vsi->xdp_prog;
 }
 
-static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
-{
-       bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
-       int qid = ring->queue_index;
-
-       if (ring_is_xdp(ring))
-               qid -= ring->vsi->alloc_queue_pairs;
-
-       if (!xdp_on)
-               return NULL;
-
-       return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
-}
-
 int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate);
 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
index 4c885801fa2699432d0eaaaca8c4487081ee36b0..7874d0ec7fb0e1a5dc4c42aeed03d7342e8f4fef 100644 (file)
@@ -2573,8 +2573,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
                return -EOPNOTSUPP;
 
        /* only magic packet is supported */
-       if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)
-                         | (wol->wolopts != WAKE_FILTER))
+       if (wol->wolopts & ~WAKE_MAGIC)
                return -EOPNOTSUPP;
 
        /* is this a new value? */
index da62218eb70ad3f4c95111c3c67f3a4dd541aa50..b1c265012c8ad03d65a6eb56e1a0d5e42371c6be 100644 (file)
@@ -3063,6 +3063,26 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
                            ring->queue_index);
 }
 
+/**
+ * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled
+ * @ring: The Tx or Rx ring
+ *
+ * Returns the UMEM or NULL.
+ **/
+static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
+{
+       bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
+       int qid = ring->queue_index;
+
+       if (ring_is_xdp(ring))
+               qid -= ring->vsi->alloc_queue_pairs;
+
+       if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
+               return NULL;
+
+       return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
+}
+
 /**
  * i40e_configure_tx_ring - Configure a transmit ring context and rest
  * @ring: The Tx ring to configure
@@ -10064,6 +10084,12 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
        hash_init(vsi->mac_filter_hash);
        vsi->irqs_ready = false;
 
+       if (type == I40E_VSI_MAIN) {
+               vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
+               if (!vsi->af_xdp_zc_qps)
+                       goto err_rings;
+       }
+
        ret = i40e_set_num_rings_in_vsi(vsi);
        if (ret)
                goto err_rings;
@@ -10082,6 +10108,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
        goto unlock_pf;
 
 err_rings:
+       bitmap_free(vsi->af_xdp_zc_qps);
        pf->next_vsi = i - 1;
        kfree(vsi);
 unlock_pf:
@@ -10162,6 +10189,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
        i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
        i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
 
+       bitmap_free(vsi->af_xdp_zc_qps);
        i40e_vsi_free_arrays(vsi, true);
        i40e_clear_rss_config_user(vsi);
 
index 5fb4353c742b9038d3ac7f867163d35cd3c3142d..31575c0bb884f2b0520b848d29420dc98438eba4 100644 (file)
@@ -146,12 +146,13 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
 static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 {
        struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
-       struct timespec64 now;
+       struct timespec64 now, then;
 
+       then = ns_to_timespec64(delta);
        mutex_lock(&pf->tmreg_lock);
 
        i40e_ptp_read(pf, &now, NULL);
-       timespec64_add_ns(&now, delta);
+       now = timespec64_add(now, then);
        i40e_ptp_write(pf, (const struct timespec64 *)&now);
 
        mutex_unlock(&pf->tmreg_lock);
index b5c182e688e351eed227f867a8eaebaa6349bb07..1b17486543ac7e078a8723512943bab1c50d6d46 100644 (file)
@@ -102,6 +102,8 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
        if (err)
                return err;
 
+       set_bit(qid, vsi->af_xdp_zc_qps);
+
        if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
 
        if (if_running) {
@@ -148,6 +150,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
                        return err;
        }
 
+       clear_bit(qid, vsi->af_xdp_zc_qps);
        i40e_xsk_umem_dma_unmap(vsi, umem);
 
        if (if_running) {
index 01fcfc6f341519c1d8a67b5c8695ec9c04842b0c..d2e2c50ce257941491e4d0a96603808310357999 100644 (file)
 /* enable link status from external LINK_0 and LINK_1 pins */
 #define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
 #define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
+#define E1000_CTRL_ADVD3WUC 0x00100000  /* D3 WUC */
+#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
 #define E1000_CTRL_SDP0_DIR 0x00400000  /* SDP0 Data direction */
 #define E1000_CTRL_SDP1_DIR 0x00800000  /* SDP1 Data direction */
 #define E1000_CTRL_RST      0x04000000  /* Global reset */
index 69b230c53fed537464ba79b0be3ede75ab13f1da..3269d8e94744f61808893267e1e305122bedd3c5 100644 (file)
@@ -8740,9 +8740,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
        struct e1000_hw *hw = &adapter->hw;
        u32 ctrl, rctl, status;
        u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
-#ifdef CONFIG_PM
-       int retval = 0;
-#endif
+       bool wake;
 
        rtnl_lock();
        netif_device_detach(netdev);
@@ -8755,14 +8753,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
        igb_clear_interrupt_scheme(adapter);
        rtnl_unlock();
 
-#ifdef CONFIG_PM
-       if (!runtime) {
-               retval = pci_save_state(pdev);
-               if (retval)
-                       return retval;
-       }
-#endif
-
        status = rd32(E1000_STATUS);
        if (status & E1000_STATUS_LU)
                wufc &= ~E1000_WUFC_LNKC;
@@ -8779,10 +8769,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
                }
 
                ctrl = rd32(E1000_CTRL);
-               /* advertise wake from D3Cold */
-               #define E1000_CTRL_ADVD3WUC 0x00100000
-               /* phy power management enable */
-               #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
                ctrl |= E1000_CTRL_ADVD3WUC;
                wr32(E1000_CTRL, ctrl);
 
@@ -8796,12 +8782,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
                wr32(E1000_WUFC, 0);
        }
 
-       *enable_wake = wufc || adapter->en_mng_pt;
-       if (!*enable_wake)
+       wake = wufc || adapter->en_mng_pt;
+       if (!wake)
                igb_power_down_link(adapter);
        else
                igb_power_up_link(adapter);
 
+       if (enable_wake)
+               *enable_wake = wake;
+
        /* Release control of h/w to f/w.  If f/w is AMT enabled, this
         * would have already happened in close and is redundant.
         */
@@ -8844,22 +8833,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev)
 
 static int __maybe_unused igb_suspend(struct device *dev)
 {
-       int retval;
-       bool wake;
-       struct pci_dev *pdev = to_pci_dev(dev);
-
-       retval = __igb_shutdown(pdev, &wake, 0);
-       if (retval)
-               return retval;
-
-       if (wake) {
-               pci_prepare_to_sleep(pdev);
-       } else {
-               pci_wake_from_d3(pdev, false);
-               pci_set_power_state(pdev, PCI_D3hot);
-       }
-
-       return 0;
+       return __igb_shutdown(to_pci_dev(dev), NULL, 0);
 }
 
 static int __maybe_unused igb_resume(struct device *dev)
@@ -8930,22 +8904,7 @@ static int __maybe_unused igb_runtime_idle(struct device *dev)
 
 static int __maybe_unused igb_runtime_suspend(struct device *dev)
 {
-       struct pci_dev *pdev = to_pci_dev(dev);
-       int retval;
-       bool wake;
-
-       retval = __igb_shutdown(pdev, &wake, 1);
-       if (retval)
-               return retval;
-
-       if (wake) {
-               pci_prepare_to_sleep(pdev);
-       } else {
-               pci_wake_from_d3(pdev, false);
-               pci_set_power_state(pdev, PCI_D3hot);
-       }
-
-       return 0;
+       return __igb_shutdown(to_pci_dev(dev), NULL, 1);
 }
 
 static int __maybe_unused igb_runtime_resume(struct device *dev)
index cc4907f9ff02c3faecba89c524674f33581d2346..2fb97967961c43893b2d06fef0df9bc476dda426 100644 (file)
@@ -905,13 +905,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
        struct pci_dev *pdev = adapter->pdev;
        struct device *dev = &adapter->netdev->dev;
        struct mii_bus *bus;
+       int err = -ENODEV;
 
-       adapter->mii_bus = devm_mdiobus_alloc(dev);
-       if (!adapter->mii_bus)
+       bus = devm_mdiobus_alloc(dev);
+       if (!bus)
                return -ENOMEM;
 
-       bus = adapter->mii_bus;
-
        switch (hw->device_id) {
        /* C3000 SoCs */
        case IXGBE_DEV_ID_X550EM_A_KR:
@@ -949,12 +948,15 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
         */
        hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22;
 
-       return mdiobus_register(bus);
+       err = mdiobus_register(bus);
+       if (!err) {
+               adapter->mii_bus = bus;
+               return 0;
+       }
 
 ixgbe_no_mii_bus:
        devm_mdiobus_free(dev, bus);
-       adapter->mii_bus = NULL;
-       return -ENODEV;
+       return err;
 }
 
 /**
index 71c65cc1790484b8e24e4d02ec973b830179c7de..d3eaf2ceaa3979b79a65d83beaaf072567d48c00 100644 (file)
@@ -858,6 +858,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs);
  * switching channels
  */
 typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
+int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
 int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
                               struct mlx5e_channels *new_chs,
                               mlx5e_fp_hw_modify hw_modify);
index 122927f3a6005b2be70c694ead0a6d4a139069c6..d5e5afbdca6dcbacb776571c97eef74e033cf356 100644 (file)
@@ -96,9 +96,6 @@ int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
        if (!eproto)
                return -EINVAL;
 
-       if (ext !=  MLX5_CAP_PCAM_FEATURE(dev, ptys_extended_ethernet))
-               return -EOPNOTSUPP;
-
        err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port);
        if (err)
                return err;
index eac245a93f918c588dc8237e1af5996f3d0f73f0..4ab0d030b54486f67096a190471a747b1ac18c56 100644 (file)
@@ -122,7 +122,9 @@ out:
        return err;
 }
 
-/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */
+/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B])
+ * minimum speed value is 40Gbps
+ */
 static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
 {
        u32 speed;
@@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
        int err;
 
        err = mlx5e_port_linkspeed(priv->mdev, &speed);
-       if (err) {
-               mlx5_core_warn(priv->mdev, "cannot get port speed\n");
-               return 0;
-       }
+       if (err)
+               speed = SPEED_40000;
+       speed = max_t(u32, speed, SPEED_40000);
 
        xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
 
@@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
 }
 
 static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
-                                u32 xoff, unsigned int mtu)
+                                u32 xoff, unsigned int max_mtu)
 {
        int i;
 
@@ -154,11 +155,12 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
                }
 
                if (port_buffer->buffer[i].size <
-                   (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
+                   (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
                        return -ENOMEM;
 
                port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
-               port_buffer->buffer[i].xon  = port_buffer->buffer[i].xoff - mtu;
+               port_buffer->buffer[i].xon  =
+                       port_buffer->buffer[i].xoff - max_mtu;
        }
 
        return 0;
@@ -166,7 +168,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
 
 /**
  * update_buffer_lossy()
- *   mtu: device's MTU
+ *   max_mtu: netdev's max_mtu
  *   pfc_en: <input> current pfc configuration
  *   buffer: <input> current prio to buffer mapping
  *   xoff:   <input> xoff value
@@ -183,7 +185,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
  *     Return 0 if no error.
  *     Set change to true if buffer configuration is modified.
  */
-static int update_buffer_lossy(unsigned int mtu,
+static int update_buffer_lossy(unsigned int max_mtu,
                               u8 pfc_en, u8 *buffer, u32 xoff,
                               struct mlx5e_port_buffer *port_buffer,
                               bool *change)
@@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu,
        }
 
        if (changed) {
-               err = update_xoff_threshold(port_buffer, xoff, mtu);
+               err = update_xoff_threshold(port_buffer, xoff, max_mtu);
                if (err)
                        return err;
 
@@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu,
        return 0;
 }
 
+#define MINIMUM_MAX_MTU 9216
 int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                                    u32 change, unsigned int mtu,
                                    struct ieee_pfc *pfc,
@@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
        bool update_prio2buffer = false;
        u8 buffer[MLX5E_MAX_PRIORITY];
        bool update_buffer = false;
+       unsigned int max_mtu;
        u32 total_used = 0;
        u8 curr_pfc_en;
        int err;
        int i;
 
        mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
+       max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
 
        err = mlx5e_port_query_buffer(priv, &port_buffer);
        if (err)
@@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
 
        if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
                update_buffer = true;
-               err = update_xoff_threshold(&port_buffer, xoff, mtu);
+               err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
                if (err)
                        return err;
        }
@@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                if (err)
                        return err;
 
-               err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff,
+               err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
                                          &port_buffer, &update_buffer);
                if (err)
                        return err;
@@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                if (err)
                        return err;
 
-               err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff,
-                                         &port_buffer, &update_buffer);
+               err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
+                                         xoff, &port_buffer, &update_buffer);
                if (err)
                        return err;
        }
@@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                        return -EINVAL;
 
                update_buffer = true;
-               err = update_xoff_threshold(&port_buffer, xoff, mtu);
+               err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
                if (err)
                        return err;
        }
@@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
        /* Need to update buffer configuration if xoff value is changed */
        if (!update_buffer && xoff != priv->dcbx.xoff) {
                update_buffer = true;
-               err = update_xoff_threshold(&port_buffer, xoff, mtu);
+               err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
                if (err)
                        return err;
        }
index 9d38e62cdf248a2f624b12227133f8132f7591bd..476dd97f7f2f25a4c0697a6ac2b34c0b5985034e 100644 (file)
@@ -186,12 +186,17 @@ static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx)
 
 static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv)
 {
-       int err;
+       int err = 0;
 
        rtnl_lock();
        mutex_lock(&priv->state_lock);
-       mlx5e_close_locked(priv->netdev);
-       err = mlx5e_open_locked(priv->netdev);
+
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+               goto out;
+
+       err = mlx5e_safe_reopen_channels(priv);
+
+out:
        mutex_unlock(&priv->state_lock);
        rtnl_unlock();
 
index fa2a3c444cdc604c308999f140a3125becd9c8d3..eec07b34b4ad07c627eebf36f87557296cc1701a 100644 (file)
@@ -39,6 +39,10 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
                        return -EOPNOTSUPP;
        }
 
+       if (!(mlx5e_eswitch_rep(*out_dev) &&
+             mlx5e_is_uplink_rep(netdev_priv(*out_dev))))
+               return -EOPNOTSUPP;
+
        return 0;
 }
 
index 3078491cc0d0678a6f1867373c752ed95496a59e..1539cf3de5dc97a180d7bdcb9fe2c5ec79db93c4 100644 (file)
@@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
        if (err)
                return err;
 
+       mutex_lock(&mdev->mlx5e_res.td.list_lock);
        list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
+       mutex_unlock(&mdev->mlx5e_res.td.list_lock);
 
        return 0;
 }
@@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
 void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
                       struct mlx5e_tir *tir)
 {
+       mutex_lock(&mdev->mlx5e_res.td.list_lock);
        mlx5_core_destroy_tir(mdev, tir->tirn);
        list_del(&tir->list);
+       mutex_unlock(&mdev->mlx5e_res.td.list_lock);
 }
 
 static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
@@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
        }
 
        INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
+       mutex_init(&mdev->mlx5e_res.td.list_lock);
 
        return 0;
 
@@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5e_tir *tir;
-       int err  = -ENOMEM;
+       int err  = 0;
        u32 tirn = 0;
        int inlen;
        void *in;
 
        inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
        in = kvzalloc(inlen, GFP_KERNEL);
-       if (!in)
+       if (!in) {
+               err = -ENOMEM;
                goto out;
+       }
 
        if (enable_uc_lb)
                MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
@@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
 
        MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
 
+       mutex_lock(&mdev->mlx5e_res.td.list_lock);
        list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
                tirn = tir->tirn;
                err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
@@ -168,6 +176,7 @@ out:
        kvfree(in);
        if (err)
                netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
+       mutex_unlock(&mdev->mlx5e_res.td.list_lock);
 
        return err;
 }
index a0987cc5fe4a12af0bf0155ad8f290153898518c..76a3d01a489e00832ee5ff45e2442dbef60b6d6d 100644 (file)
@@ -603,16 +603,18 @@ static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
                          __ETHTOOL_LINK_MODE_MASK_NBITS);
 }
 
-static void ptys2ethtool_adver_link(struct mlx5_core_dev *mdev,
-                                   unsigned long *advertising_modes,
-                                   u32 eth_proto_cap)
+static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
+                                   u32 eth_proto_cap, bool ext)
 {
        unsigned long proto_cap = eth_proto_cap;
        struct ptys2ethtool_config *table;
        u32 max_size;
        int proto;
 
-       mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size);
+       table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
+       max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
+                        ARRAY_SIZE(ptys2legacy_ethtool_table);
+
        for_each_set_bit(proto, &proto_cap, max_size)
                bitmap_or(advertising_modes, advertising_modes,
                          table[proto].advertised,
@@ -794,12 +796,12 @@ static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
        ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
 }
 
-static void get_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
-                           u8 tx_pause, u8 rx_pause,
-                           struct ethtool_link_ksettings *link_ksettings)
+static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause,
+                           struct ethtool_link_ksettings *link_ksettings,
+                           bool ext)
 {
        unsigned long *advertising = link_ksettings->link_modes.advertising;
-       ptys2ethtool_adver_link(mdev, advertising, eth_proto_cap);
+       ptys2ethtool_adver_link(advertising, eth_proto_cap, ext);
 
        if (rx_pause)
                ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
@@ -854,8 +856,9 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
                               struct ethtool_link_ksettings *link_ksettings)
 {
        unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
+       bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
 
-       ptys2ethtool_adver_link(mdev, lp_advertising, eth_proto_lp);
+       ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
 }
 
 int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
@@ -872,6 +875,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
        u8 an_disable_admin;
        u8 an_status;
        u8 connector_type;
+       bool admin_ext;
        bool ext;
        int err;
 
@@ -886,6 +890,19 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
                                              eth_proto_capability);
        eth_proto_admin  = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
                                              eth_proto_admin);
+       /* Fields: eth_proto_admin and ext_eth_proto_admin  are
+        * mutually exclusive. Hence try reading legacy advertising
+        * when extended advertising is zero.
+        * admin_ext indicates how eth_proto_admin should be
+        * interpreted
+        */
+       admin_ext = ext;
+       if (ext && !eth_proto_admin) {
+               eth_proto_admin  = MLX5_GET_ETH_PROTO(ptys_reg, out, false,
+                                                     eth_proto_admin);
+               admin_ext = false;
+       }
+
        eth_proto_oper   = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
                                              eth_proto_oper);
        eth_proto_lp        = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
@@ -899,7 +916,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
        ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
 
        get_supported(mdev, eth_proto_cap, link_ksettings);
-       get_advertising(mdev, eth_proto_admin, tx_pause, rx_pause, link_ksettings);
+       get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
+                       admin_ext);
        get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings);
 
        eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
@@ -997,19 +1015,17 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
 
 #define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1)
 
-       ext_requested = (link_ksettings->link_modes.advertising[0] >
-                       MLX5E_PTYS_EXT);
+       ext_requested = !!(link_ksettings->link_modes.advertising[0] >
+                       MLX5E_PTYS_EXT ||
+                       link_ksettings->link_modes.advertising[1]);
        ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
-
-       /*when ptys_extended_ethernet is set legacy link modes are deprecated */
-       if (ext_requested != ext_supported)
-               return -EPROTONOSUPPORT;
+       ext_requested &= ext_supported;
 
        speed = link_ksettings->base.speed;
        ethtool2ptys_adver_func = ext_requested ?
                                  mlx5e_ethtool2ptys_ext_adver_link :
                                  mlx5e_ethtool2ptys_adver_link;
-       err = mlx5_port_query_eth_proto(mdev, 1, ext_supported, &eproto);
+       err = mlx5_port_query_eth_proto(mdev, 1, ext_requested, &eproto);
        if (err) {
                netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n",
                           __func__, err);
@@ -1037,7 +1053,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
        if (!an_changes && link_modes == eproto.admin)
                goto out;
 
-       mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_supported);
+       mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_requested);
        mlx5_toggle_port_link(mdev);
 
 out:
@@ -1752,7 +1768,8 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
        struct mlx5e_channel *c;
        int i;
 
-       if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state) ||
+           priv->channels.params.xdp_prog)
                return 0;
 
        for (i = 0; i < channels->num; i++) {
index b5fdbd3190d9fa99e6207c25e183354b1d5dad01..f7eb521db580001f48ee021878f5e8d477ad4dbc 100644 (file)
@@ -951,7 +951,11 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
        if (params->rx_dim_enabled)
                __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
 
-       if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE))
+       /* We disable csum_complete when XDP is enabled since
+        * XDP programs might manipulate packets which will render
+        * skb->checksum incorrect.
+        */
+       if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
                __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
 
        return 0;
@@ -2937,6 +2941,14 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
        return 0;
 }
 
+int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
+{
+       struct mlx5e_channels new_channels = {};
+
+       new_channels.params = priv->channels.params;
+       return mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+}
+
 void mlx5e_timestamp_init(struct mlx5e_priv *priv)
 {
        priv->tstamp.tx_type   = HWTSTAMP_TX_OFF;
@@ -4161,11 +4173,10 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
        if (!report_failed)
                goto unlock;
 
-       mlx5e_close_locked(priv->netdev);
-       err = mlx5e_open_locked(priv->netdev);
+       err = mlx5e_safe_reopen_channels(priv);
        if (err)
                netdev_err(priv->netdev,
-                          "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
+                          "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n",
                           err);
 
 unlock:
@@ -4553,7 +4564,7 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
 {
        enum mlx5e_traffic_types tt;
 
-       rss_params->hfunc = ETH_RSS_HASH_XOR;
+       rss_params->hfunc = ETH_RSS_HASH_TOP;
        netdev_rss_key_fill(rss_params->toeplitz_hash_key,
                            sizeof(rss_params->toeplitz_hash_key));
        mlx5e_build_default_indir_rqt(rss_params->indirection_rqt,
index 3dde5c7e0739afd6d04f874290d5a332c97f68cf..c3b3002ff62f073f8c9fff88ea2fb74693474619 100644 (file)
@@ -692,7 +692,14 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
 {
        *proto = ((struct ethhdr *)skb->data)->h_proto;
        *proto = __vlan_get_protocol(skb, *proto, network_depth);
-       return (*proto == htons(ETH_P_IP) || *proto == htons(ETH_P_IPV6));
+
+       if (*proto == htons(ETH_P_IP))
+               return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
+
+       if (*proto == htons(ETH_P_IPV6))
+               return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
+
+       return false;
 }
 
 static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
@@ -712,17 +719,6 @@ static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
        rq->stats->ecn_mark += !!rc;
 }
 
-static u32 mlx5e_get_fcs(const struct sk_buff *skb)
-{
-       const void *fcs_bytes;
-       u32 _fcs_bytes;
-
-       fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN,
-                                      ETH_FCS_LEN, &_fcs_bytes);
-
-       return __get_unaligned_cpu32(fcs_bytes);
-}
-
 static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
 {
        void *ip_p = skb->data + network_depth;
@@ -733,6 +729,68 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
 
 #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
 
+#define MAX_PADDING 8
+
+static void
+tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
+                      struct mlx5e_rq_stats *stats)
+{
+       stats->csum_complete_tail_slow++;
+       skb->csum = csum_block_add(skb->csum,
+                                  skb_checksum(skb, offset, len, 0),
+                                  offset);
+}
+
+static void
+tail_padding_csum(struct sk_buff *skb, int offset,
+                 struct mlx5e_rq_stats *stats)
+{
+       u8 tail_padding[MAX_PADDING];
+       int len = skb->len - offset;
+       void *tail;
+
+       if (unlikely(len > MAX_PADDING)) {
+               tail_padding_csum_slow(skb, offset, len, stats);
+               return;
+       }
+
+       tail = skb_header_pointer(skb, offset, len, tail_padding);
+       if (unlikely(!tail)) {
+               tail_padding_csum_slow(skb, offset, len, stats);
+               return;
+       }
+
+       stats->csum_complete_tail++;
+       skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
+}
+
+static void
+mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto,
+                      struct mlx5e_rq_stats *stats)
+{
+       struct ipv6hdr *ip6;
+       struct iphdr   *ip4;
+       int pkt_len;
+
+       switch (proto) {
+       case htons(ETH_P_IP):
+               ip4 = (struct iphdr *)(skb->data + network_depth);
+               pkt_len = network_depth + ntohs(ip4->tot_len);
+               break;
+       case htons(ETH_P_IPV6):
+               ip6 = (struct ipv6hdr *)(skb->data + network_depth);
+               pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
+               break;
+       default:
+               return;
+       }
+
+       if (likely(pkt_len >= skb->len))
+               return;
+
+       tail_padding_csum(skb, pkt_len, stats);
+}
+
 static inline void mlx5e_handle_csum(struct net_device *netdev,
                                     struct mlx5_cqe64 *cqe,
                                     struct mlx5e_rq *rq,
@@ -752,7 +810,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
                return;
        }
 
-       if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)))
+       /* True when explicitly set via priv flag, or XDP prog is loaded */
+       if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
                goto csum_unnecessary;
 
        /* CQE csum doesn't cover padding octets in short ethernet
@@ -780,18 +839,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
                        skb->csum = csum_partial(skb->data + ETH_HLEN,
                                                 network_depth - ETH_HLEN,
                                                 skb->csum);
-               if (unlikely(netdev->features & NETIF_F_RXFCS))
-                       skb->csum = csum_block_add(skb->csum,
-                                                  (__force __wsum)mlx5e_get_fcs(skb),
-                                                  skb->len - ETH_FCS_LEN);
+
+               mlx5e_skb_padding_csum(skb, network_depth, proto, stats);
                stats->csum_complete++;
                return;
        }
 
 csum_unnecessary:
        if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
-                  ((cqe->hds_ip_ext & CQE_L4_OK) ||
-                   (get_cqe_l4_hdr_type(cqe) == CQE_L4_HDR_TYPE_NONE)))) {
+                  (cqe->hds_ip_ext & CQE_L4_OK))) {
                skb->ip_summed = CHECKSUM_UNNECESSARY;
                if (cqe_is_tunneled(cqe)) {
                        skb->csum_level = 1;
index 1a78e05cbba8168d919bfd45af3378becd3c9b68..b75aa8b8bf04eac8cac464c0c8550013154f6267 100644 (file)
@@ -59,6 +59,8 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
@@ -151,6 +153,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
                s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
                s->rx_csum_none += rq_stats->csum_none;
                s->rx_csum_complete += rq_stats->csum_complete;
+               s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
+               s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
                s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
                s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
                s->rx_xdp_drop     += rq_stats->xdp_drop;
@@ -1190,6 +1194,8 @@ static const struct counter_desc rq_stats_desc[] = {
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
index 4640d4f986f8c6495bc5c94cf22217fb59a64b34..16c3b785f282b109e9b2bc54bd4c136095be9b3f 100644 (file)
@@ -71,6 +71,8 @@ struct mlx5e_sw_stats {
        u64 rx_csum_unnecessary;
        u64 rx_csum_none;
        u64 rx_csum_complete;
+       u64 rx_csum_complete_tail;
+       u64 rx_csum_complete_tail_slow;
        u64 rx_csum_unnecessary_inner;
        u64 rx_xdp_drop;
        u64 rx_xdp_redirect;
@@ -181,6 +183,8 @@ struct mlx5e_rq_stats {
        u64 packets;
        u64 bytes;
        u64 csum_complete;
+       u64 csum_complete_tail;
+       u64 csum_complete_tail_slow;
        u64 csum_unnecessary;
        u64 csum_unnecessary_inner;
        u64 csum_none;
index b4967a0ff8c7ba6ce51826e4ae61964a52bfebb5..d75dc44eb2ff63482820cd4d317af4577fcc1fbd 100644 (file)
@@ -2158,6 +2158,52 @@ static bool csum_offload_supported(struct mlx5e_priv *priv,
        return true;
 }
 
+struct ip_ttl_word {
+       __u8    ttl;
+       __u8    protocol;
+       __sum16 check;
+};
+
+struct ipv6_hoplimit_word {
+       __be16  payload_len;
+       __u8    nexthdr;
+       __u8    hop_limit;
+};
+
+static bool is_action_keys_supported(const struct flow_action_entry *act)
+{
+       u32 mask, offset;
+       u8 htype;
+
+       htype = act->mangle.htype;
+       offset = act->mangle.offset;
+       mask = ~act->mangle.mask;
+       /* For IPv4 & IPv6 header check 4 byte word,
+        * to determine that modified fields
+        * are NOT ttl & hop_limit only.
+        */
+       if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
+               struct ip_ttl_word *ttl_word =
+                       (struct ip_ttl_word *)&mask;
+
+               if (offset != offsetof(struct iphdr, ttl) ||
+                   ttl_word->protocol ||
+                   ttl_word->check) {
+                       return true;
+               }
+       } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
+               struct ipv6_hoplimit_word *hoplimit_word =
+                       (struct ipv6_hoplimit_word *)&mask;
+
+               if (offset != offsetof(struct ipv6hdr, payload_len) ||
+                   hoplimit_word->payload_len ||
+                   hoplimit_word->nexthdr) {
+                       return true;
+               }
+       }
+       return false;
+}
+
 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
                                          struct flow_action *flow_action,
                                          u32 actions,
@@ -2165,9 +2211,9 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
 {
        const struct flow_action_entry *act;
        bool modify_ip_header;
-       u8 htype, ip_proto;
        void *headers_v;
        u16 ethertype;
+       u8 ip_proto;
        int i;
 
        if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
@@ -2187,9 +2233,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
                    act->id != FLOW_ACTION_ADD)
                        continue;
 
-               htype = act->mangle.htype;
-               if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 ||
-                   htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
+               if (is_action_keys_supported(act)) {
                        modify_ip_header = true;
                        break;
                }
@@ -2340,15 +2384,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
        return 0;
 }
 
-static inline int cmp_encap_info(struct ip_tunnel_key *a,
-                                struct ip_tunnel_key *b)
+struct encap_key {
+       struct ip_tunnel_key *ip_tun_key;
+       int tunnel_type;
+};
+
+static inline int cmp_encap_info(struct encap_key *a,
+                                struct encap_key *b)
 {
-       return memcmp(a, b, sizeof(*a));
+       return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
+              a->tunnel_type != b->tunnel_type;
 }
 
-static inline int hash_encap_info(struct ip_tunnel_key *key)
+static inline int hash_encap_info(struct encap_key *key)
 {
-       return jhash(key, sizeof(*key), 0);
+       return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
+                    key->tunnel_type);
 }
 
 
@@ -2379,7 +2430,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
        struct mlx5_esw_flow_attr *attr = flow->esw_attr;
        struct mlx5e_tc_flow_parse_attr *parse_attr;
        struct ip_tunnel_info *tun_info;
-       struct ip_tunnel_key *key;
+       struct encap_key key, e_key;
        struct mlx5e_encap_entry *e;
        unsigned short family;
        uintptr_t hash_key;
@@ -2389,13 +2440,16 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
        parse_attr = attr->parse_attr;
        tun_info = &parse_attr->tun_info[out_index];
        family = ip_tunnel_info_af(tun_info);
-       key = &tun_info->key;
+       key.ip_tun_key = &tun_info->key;
+       key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev);
 
-       hash_key = hash_encap_info(key);
+       hash_key = hash_encap_info(&key);
 
        hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
                                   encap_hlist, hash_key) {
-               if (!cmp_encap_info(&e->tun_info.key, key)) {
+               e_key.ip_tun_key = &e->tun_info.key;
+               e_key.tunnel_type = e->tunnel_type;
+               if (!cmp_encap_info(&e_key, &key)) {
                        found = true;
                        break;
                }
@@ -2657,7 +2711,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
 
        if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
            hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
-               err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
+               err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
                                            parse_attr, hdrs, extack);
                if (err)
                        return err;
index ecd2c747f7260306fd972478ecce71610918e3b3..8a67fd197b7923f67af1872eae280e0f5e3eb663 100644 (file)
@@ -105,8 +105,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
                 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
        MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
        MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
-       if (vport)
-               MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+       MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
        nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
                                     in, nic_vport_context);
 
@@ -134,8 +133,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
        MLX5_SET(modify_esw_vport_context_in, in, opcode,
                 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
        MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
-       if (vport)
-               MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
+       MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
        return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
 }
 
@@ -431,6 +429,8 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
 {
        int err;
 
+       memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
+
        err = esw_create_legacy_vepa_table(esw);
        if (err)
                return err;
@@ -2157,6 +2157,7 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
 
        /* Star rule to forward all traffic to uplink vport */
        memset(spec, 0, sizeof(*spec));
+       memset(&dest, 0, sizeof(dest));
        dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
        dest.vport.num = MLX5_VPORT_UPLINK;
        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
index f2260391be5b952478175de9be8235b3154c4ccf..9b2d78ee22b88333fcc0b043d4234c340dcb3c32 100644 (file)
@@ -1611,6 +1611,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
 {
        int err;
 
+       memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
        mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
 
        err = esw_create_offloads_fdb_tables(esw, nvports);
index 5cf5f2a9d51fec724f4fac709e29e40f4110d5f7..22a2ef11151441c3abcfc07e7a6e66e292563cae 100644 (file)
@@ -148,14 +148,16 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
        return ret;
 }
 
-static void mlx5_fpga_tls_release_swid(struct idr *idr,
-                                      spinlock_t *idr_spinlock, u32 swid)
+static void *mlx5_fpga_tls_release_swid(struct idr *idr,
+                                       spinlock_t *idr_spinlock, u32 swid)
 {
        unsigned long flags;
+       void *ptr;
 
        spin_lock_irqsave(idr_spinlock, flags);
-       idr_remove(idr, swid);
+       ptr = idr_remove(idr, swid);
        spin_unlock_irqrestore(idr_spinlock, flags);
+       return ptr;
 }
 
 static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
@@ -165,20 +167,12 @@ static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
        kfree(buf);
 }
 
-struct mlx5_teardown_stream_context {
-       struct mlx5_fpga_tls_command_context cmd;
-       u32 swid;
-};
-
 static void
 mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
                                  struct mlx5_fpga_device *fdev,
                                  struct mlx5_fpga_tls_command_context *cmd,
                                  struct mlx5_fpga_dma_buf *resp)
 {
-       struct mlx5_teardown_stream_context *ctx =
-                   container_of(cmd, struct mlx5_teardown_stream_context, cmd);
-
        if (resp) {
                u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
 
@@ -186,14 +180,6 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
                        mlx5_fpga_err(fdev,
                                      "Teardown stream failed with syndrome = %d",
                                      syndrome);
-               else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx))
-                       mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
-                                                  &fdev->tls->tx_idr_spinlock,
-                                                  ctx->swid);
-               else
-                       mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr,
-                                                  &fdev->tls->rx_idr_spinlock,
-                                                  ctx->swid);
        }
        mlx5_fpga_tls_put_command_ctx(cmd);
 }
@@ -225,8 +211,14 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
 
        rcu_read_lock();
        flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
-       rcu_read_unlock();
+       if (unlikely(!flow)) {
+               rcu_read_unlock();
+               WARN_ONCE(1, "Received NULL pointer for handle\n");
+               kfree(buf);
+               return -EINVAL;
+       }
        mlx5_fpga_tls_flow_to_cmd(flow, cmd);
+       rcu_read_unlock();
 
        MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
        MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
@@ -238,6 +230,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
        buf->complete = mlx_tls_kfree_complete;
 
        ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
+       if (ret < 0)
+               kfree(buf);
 
        return ret;
 }
@@ -245,7 +239,7 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
 static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
                                            void *flow, u32 swid, gfp_t flags)
 {
-       struct mlx5_teardown_stream_context *ctx;
+       struct mlx5_fpga_tls_command_context *ctx;
        struct mlx5_fpga_dma_buf *buf;
        void *cmd;
 
@@ -253,7 +247,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
        if (!ctx)
                return;
 
-       buf = &ctx->cmd.buf;
+       buf = &ctx->buf;
        cmd = (ctx + 1);
        MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
        MLX5_SET(tls_cmd, cmd, swid, swid);
@@ -264,8 +258,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
        buf->sg[0].data = cmd;
        buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
 
-       ctx->swid = swid;
-       mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
+       mlx5_fpga_tls_cmd_send(mdev->fpga, ctx,
                               mlx5_fpga_tls_teardown_completion);
 }
 
@@ -275,13 +268,14 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
        struct mlx5_fpga_tls *tls = mdev->fpga->tls;
        void *flow;
 
-       rcu_read_lock();
        if (direction_sx)
-               flow = idr_find(&tls->tx_idr, swid);
+               flow = mlx5_fpga_tls_release_swid(&tls->tx_idr,
+                                                 &tls->tx_idr_spinlock,
+                                                 swid);
        else
-               flow = idr_find(&tls->rx_idr, swid);
-
-       rcu_read_unlock();
+               flow = mlx5_fpga_tls_release_swid(&tls->rx_idr,
+                                                 &tls->rx_idr_spinlock,
+                                                 swid);
 
        if (!flow) {
                mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
@@ -289,6 +283,7 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
                return;
        }
 
+       synchronize_rcu(); /* before kfree(flow) */
        mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
 }
 
index 70cc906a102b2dde87d161385126f43da4948266..76716419370df9738384675c3dc36e5d0ff6cf27 100644 (file)
@@ -164,26 +164,6 @@ static struct mlx5_profile profile[] = {
                        .size   = 8,
                        .limit  = 4
                },
-               .mr_cache[16]   = {
-                       .size   = 8,
-                       .limit  = 4
-               },
-               .mr_cache[17]   = {
-                       .size   = 8,
-                       .limit  = 4
-               },
-               .mr_cache[18]   = {
-                       .size   = 8,
-                       .limit  = 4
-               },
-               .mr_cache[19]   = {
-                       .size   = 4,
-                       .limit  = 2
-               },
-               .mr_cache[20]   = {
-                       .size   = 4,
-                       .limit  = 2
-               },
        },
 };
 
index d23d53c0e2842bc0e28a179d3ef6457a3369cee7..f26a4ca293637b48ccc9aa8b9e55f375f767eedb 100644 (file)
@@ -568,7 +568,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
        if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
                return 0;
 
-       emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0);
+       emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
        if (!emad_wq)
                return -ENOMEM;
        mlxsw_core->emad_wq = emad_wq;
@@ -1958,10 +1958,10 @@ static int __init mlxsw_core_module_init(void)
 {
        int err;
 
-       mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0);
+       mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
        if (!mlxsw_wq)
                return -ENOMEM;
-       mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
+       mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
                                            mlxsw_core_driver_name);
        if (!mlxsw_owq) {
                err = -ENOMEM;
index 9a79b5e1159743a9b619407cd3b9b9af065c97ca..d633bef5f10512269547c00f718f552720dd29a3 100644 (file)
@@ -70,6 +70,7 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
        {MLXSW_REG_SBXX_DIR_EGRESS, 1},
        {MLXSW_REG_SBXX_DIR_EGRESS, 2},
        {MLXSW_REG_SBXX_DIR_EGRESS, 3},
+       {MLXSW_REG_SBXX_DIR_EGRESS, 15},
 };
 
 #define MLXSW_SP_SB_ING_TC_COUNT 8
@@ -428,6 +429,7 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
        MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
        MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
        MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
+       MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI),
 };
 
 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
@@ -517,14 +519,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
        MLXSW_SP_SB_CM(0, 7, 4),
        MLXSW_SP_SB_CM(0, 7, 4),
        MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
        MLXSW_SP_SB_CM(1, 0xff, 4),
 };
 
@@ -671,6 +673,7 @@ static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
        MLXSW_SP_SB_PM(0, 0),
        MLXSW_SP_SB_PM(0, 0),
        MLXSW_SP_SB_PM(0, 0),
+       MLXSW_SP_SB_PM(10000, 90000),
 };
 
 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
index 52fed8c7bf1edf61aa1c3b61c04f71e8dfc9717f..902e766a8ed33eabbe0b11075284878ba3fb3ad4 100644 (file)
@@ -6781,7 +6781,7 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
        /* A RIF is not created for macvlan netdevs. Their MAC is used to
         * populate the FDB
         */
-       if (netif_is_macvlan(dev))
+       if (netif_is_macvlan(dev) || netif_is_l3_master(dev))
                return 0;
 
        for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
index f6ce386c30367f08a86153b8ac691a1480a1588a..50111f228d77228758d5e0ad634b1848712e11d4 100644 (file)
@@ -1630,7 +1630,7 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
        u16 fid_index;
        int err = 0;
 
-       if (switchdev_trans_ph_prepare(trans))
+       if (switchdev_trans_ph_commit(trans))
                return 0;
 
        bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
index a1d0d6e4253324f702f6eecae804fdd574b5f32e..d715ef4fc92fdb61a89122793133db147c1e4f59 100644 (file)
@@ -613,7 +613,7 @@ static int ocelot_mact_mc_add(struct ocelot_port *port,
                              struct netdev_hw_addr *hw_addr)
 {
        struct ocelot *ocelot = port->ocelot;
-       struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_KERNEL);
+       struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_ATOMIC);
 
        if (!ha)
                return -ENOMEM;
@@ -959,10 +959,8 @@ static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
                       ETH_GSTRING_LEN);
 }
 
-static void ocelot_check_stats(struct work_struct *work)
+static void ocelot_update_stats(struct ocelot *ocelot)
 {
-       struct delayed_work *del_work = to_delayed_work(work);
-       struct ocelot *ocelot = container_of(del_work, struct ocelot, stats_work);
        int i, j;
 
        mutex_lock(&ocelot->stats_lock);
@@ -986,11 +984,19 @@ static void ocelot_check_stats(struct work_struct *work)
                }
        }
 
-       cancel_delayed_work(&ocelot->stats_work);
+       mutex_unlock(&ocelot->stats_lock);
+}
+
+static void ocelot_check_stats_work(struct work_struct *work)
+{
+       struct delayed_work *del_work = to_delayed_work(work);
+       struct ocelot *ocelot = container_of(del_work, struct ocelot,
+                                            stats_work);
+
+       ocelot_update_stats(ocelot);
+
        queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
                           OCELOT_STATS_CHECK_DELAY);
-
-       mutex_unlock(&ocelot->stats_lock);
 }
 
 static void ocelot_get_ethtool_stats(struct net_device *dev,
@@ -1001,7 +1007,7 @@ static void ocelot_get_ethtool_stats(struct net_device *dev,
        int i;
 
        /* check and update now */
-       ocelot_check_stats(&ocelot->stats_work.work);
+       ocelot_update_stats(ocelot);
 
        /* Copy all counters */
        for (i = 0; i < ocelot->num_stats; i++)
@@ -1809,7 +1815,7 @@ int ocelot_init(struct ocelot *ocelot)
                                 ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6),
                                 ANA_CPUQ_8021_CFG, i);
 
-       INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats);
+       INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
        queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
                           OCELOT_STATS_CHECK_DELAY);
        return 0;
index 7cde387e5ec62a0c36f070a163a6e5b9c38a6a4b..51cd57ab3d9584d3d67508cc94aa6c9590aa11d1 100644 (file)
@@ -2366,6 +2366,7 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
                                dma_object->addr))) {
                        vxge_os_dma_free(devh->pdev, memblock,
                                &dma_object->acc_handle);
+                       memblock = NULL;
                        goto exit;
                }
 
index eeda4ed98333afbf5671c1fd6d4ad69bc443a9e4..e336f6ee94f5c6d3a9108b85a6fb0529ecb7031d 100644 (file)
@@ -48,8 +48,7 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
 
        tmp_push_vlan_tci =
                FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) |
-               FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) |
-               NFP_FL_PUSH_VLAN_CFI;
+               FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid);
        push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
 }
 
index 4fcaf11ed56ed6f2ee89dde5313962676fba9f92..0ed51e79db00ebb90f0f9e8b2d9e74631c7ed4dc 100644 (file)
@@ -26,7 +26,7 @@
 #define NFP_FLOWER_LAYER2_GENEVE_OP    BIT(6)
 
 #define NFP_FLOWER_MASK_VLAN_PRIO      GENMASK(15, 13)
-#define NFP_FLOWER_MASK_VLAN_CFI       BIT(12)
+#define NFP_FLOWER_MASK_VLAN_PRESENT   BIT(12)
 #define NFP_FLOWER_MASK_VLAN_VID       GENMASK(11, 0)
 
 #define NFP_FLOWER_MASK_MPLS_LB                GENMASK(31, 12)
@@ -82,7 +82,6 @@
 #define NFP_FL_OUT_FLAGS_TYPE_IDX      GENMASK(2, 0)
 
 #define NFP_FL_PUSH_VLAN_PRIO          GENMASK(15, 13)
-#define NFP_FL_PUSH_VLAN_CFI           BIT(12)
 #define NFP_FL_PUSH_VLAN_VID           GENMASK(11, 0)
 
 #define IPV6_FLOW_LABEL_MASK           cpu_to_be32(0x000fffff)
index e03c8ef2c28c525b7fb44b4ce8f3e294d3d4fef6..9b8b843d0340374a6372501e2c69eb298563be75 100644 (file)
@@ -30,20 +30,19 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
 
                flow_rule_match_vlan(rule, &match);
                /* Populate the tci field. */
-               if (match.key->vlan_id || match.key->vlan_priority) {
-                       tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
-                                            match.key->vlan_priority) |
-                                 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
-                                            match.key->vlan_id) |
-                                 NFP_FLOWER_MASK_VLAN_CFI;
-                       ext->tci = cpu_to_be16(tmp_tci);
-                       tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
-                                            match.mask->vlan_priority) |
-                                 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
-                                            match.mask->vlan_id) |
-                                 NFP_FLOWER_MASK_VLAN_CFI;
-                       msk->tci = cpu_to_be16(tmp_tci);
-               }
+               tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+               tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+                                     match.key->vlan_priority) |
+                          FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+                                     match.key->vlan_id);
+               ext->tci = cpu_to_be16(tmp_tci);
+
+               tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+               tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+                                     match.mask->vlan_priority) |
+                          FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+                                     match.mask->vlan_id);
+               msk->tci = cpu_to_be16(tmp_tci);
        }
 }
 
index d2c803bb4e562dd3805aaea86e689e5f682ce9ed..94d228c044963b8494c737aa1c46700d3598fd11 100644 (file)
@@ -195,7 +195,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
        ret = dev_queue_xmit(skb);
        nfp_repr_inc_tx_stats(netdev, len, ret);
 
-       return ret;
+       return NETDEV_TX_OK;
 }
 
 static int nfp_repr_stop(struct net_device *netdev)
@@ -383,7 +383,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
        netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
        netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
 
-       netdev->priv_flags |= IFF_NO_QUEUE;
+       netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
        netdev->features |= NETIF_F_LLTX;
 
        if (nfp_app_has_tc(app)) {
index 43a57ec296fd9c6e55ec667f34b6ddd98880e41f..127c89b22ef0da7d60680481467517f83ca7bc72 100644 (file)
@@ -431,12 +431,16 @@ struct qed_qm_info {
        u8 num_pf_rls;
 };
 
+#define QED_OVERFLOW_BIT       1
+
 struct qed_db_recovery_info {
        struct list_head list;
 
        /* Lock to protect the doorbell recovery mechanism list */
        spinlock_t lock;
+       bool dorq_attn;
        u32 db_recovery_counter;
+       unsigned long overflow;
 };
 
 struct storm_stats {
@@ -920,8 +924,7 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
 
 /* doorbell recovery mechanism */
 void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
-void qed_db_recovery_execute(struct qed_hwfn *p_hwfn,
-                            enum qed_db_rec_exec db_exec);
+void qed_db_recovery_execute(struct qed_hwfn *p_hwfn);
 bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
 
 /* Other Linux specific common definitions */
index 9df8c4b3b54e3dc71fdca5a759a9d98b303d8ac7..866cdc86a3f27c879d4364089597ea499a0b1714 100644 (file)
@@ -102,11 +102,15 @@ static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn,
 
 /* Doorbell address sanity (address within doorbell bar range) */
 static bool qed_db_rec_sanity(struct qed_dev *cdev,
-                             void __iomem *db_addr, void *db_data)
+                             void __iomem *db_addr,
+                             enum qed_db_rec_width db_width,
+                             void *db_data)
 {
+       u32 width = (db_width == DB_REC_WIDTH_32B) ? 32 : 64;
+
        /* Make sure doorbell address is within the doorbell bar */
        if (db_addr < cdev->doorbells ||
-           (u8 __iomem *)db_addr >
+           (u8 __iomem *)db_addr + width >
            (u8 __iomem *)cdev->doorbells + cdev->db_size) {
                WARN(true,
                     "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n",
@@ -159,7 +163,7 @@ int qed_db_recovery_add(struct qed_dev *cdev,
        }
 
        /* Sanitize doorbell address */
-       if (!qed_db_rec_sanity(cdev, db_addr, db_data))
+       if (!qed_db_rec_sanity(cdev, db_addr, db_width, db_data))
                return -EINVAL;
 
        /* Obtain hwfn from doorbell address */
@@ -205,10 +209,6 @@ int qed_db_recovery_del(struct qed_dev *cdev,
                return 0;
        }
 
-       /* Sanitize doorbell address */
-       if (!qed_db_rec_sanity(cdev, db_addr, db_data))
-               return -EINVAL;
-
        /* Obtain hwfn from doorbell address */
        p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr);
 
@@ -300,31 +300,24 @@ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn)
 
 /* Ring the doorbell of a single doorbell recovery entry */
 static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
-                                struct qed_db_recovery_entry *db_entry,
-                                enum qed_db_rec_exec db_exec)
-{
-       if (db_exec != DB_REC_ONCE) {
-               /* Print according to width */
-               if (db_entry->db_width == DB_REC_WIDTH_32B) {
-                       DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
-                                  "%s doorbell address %p data %x\n",
-                                  db_exec == DB_REC_DRY_RUN ?
-                                  "would have rung" : "ringing",
-                                  db_entry->db_addr,
-                                  *(u32 *)db_entry->db_data);
-               } else {
-                       DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
-                                  "%s doorbell address %p data %llx\n",
-                                  db_exec == DB_REC_DRY_RUN ?
-                                  "would have rung" : "ringing",
-                                  db_entry->db_addr,
-                                  *(u64 *)(db_entry->db_data));
-               }
+                                struct qed_db_recovery_entry *db_entry)
+{
+       /* Print according to width */
+       if (db_entry->db_width == DB_REC_WIDTH_32B) {
+               DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+                          "ringing doorbell address %p data %x\n",
+                          db_entry->db_addr,
+                          *(u32 *)db_entry->db_data);
+       } else {
+               DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+                          "ringing doorbell address %p data %llx\n",
+                          db_entry->db_addr,
+                          *(u64 *)(db_entry->db_data));
        }
 
        /* Sanity */
        if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr,
-                              db_entry->db_data))
+                              db_entry->db_width, db_entry->db_data))
                return;
 
        /* Flush the write combined buffer. Since there are multiple doorbelling
@@ -334,14 +327,12 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
        wmb();
 
        /* Ring the doorbell */
-       if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) {
-               if (db_entry->db_width == DB_REC_WIDTH_32B)
-                       DIRECT_REG_WR(db_entry->db_addr,
-                                     *(u32 *)(db_entry->db_data));
-               else
-                       DIRECT_REG_WR64(db_entry->db_addr,
-                                       *(u64 *)(db_entry->db_data));
-       }
+       if (db_entry->db_width == DB_REC_WIDTH_32B)
+               DIRECT_REG_WR(db_entry->db_addr,
+                             *(u32 *)(db_entry->db_data));
+       else
+               DIRECT_REG_WR64(db_entry->db_addr,
+                               *(u64 *)(db_entry->db_data));
 
        /* Flush the write combined buffer. Next doorbell may come from a
         * different entity to the same address...
@@ -350,29 +341,21 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
 }
 
 /* Traverse the doorbell recovery entry list and ring all the doorbells */
-void qed_db_recovery_execute(struct qed_hwfn *p_hwfn,
-                            enum qed_db_rec_exec db_exec)
+void qed_db_recovery_execute(struct qed_hwfn *p_hwfn)
 {
        struct qed_db_recovery_entry *db_entry = NULL;
 
-       if (db_exec != DB_REC_ONCE) {
-               DP_NOTICE(p_hwfn,
-                         "Executing doorbell recovery. Counter was %d\n",
-                         p_hwfn->db_recovery_info.db_recovery_counter);
+       DP_NOTICE(p_hwfn, "Executing doorbell recovery. Counter was %d\n",
+                 p_hwfn->db_recovery_info.db_recovery_counter);
 
-               /* Track amount of times recovery was executed */
-               p_hwfn->db_recovery_info.db_recovery_counter++;
-       }
+       /* Track amount of times recovery was executed */
+       p_hwfn->db_recovery_info.db_recovery_counter++;
 
        /* Protect the list */
        spin_lock_bh(&p_hwfn->db_recovery_info.lock);
        list_for_each_entry(db_entry,
-                           &p_hwfn->db_recovery_info.list, list_entry) {
-               qed_db_recovery_ring(p_hwfn, db_entry, db_exec);
-               if (db_exec == DB_REC_ONCE)
-                       break;
-       }
-
+                           &p_hwfn->db_recovery_info.list, list_entry)
+               qed_db_recovery_ring(p_hwfn, db_entry);
        spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
 }
 
index e23980e301b6a2be7f015d6a0c6f6aaadbf788b3..8848d5bed6e5c58a188900bf9ad5710529d66b51 100644 (file)
@@ -378,6 +378,9 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
        u32 count = QED_DB_REC_COUNT;
        u32 usage = 1;
 
+       /* Flush any pending (e)dpms as they may never arrive */
+       qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
+
        /* wait for usage to zero or count to run out. This is necessary since
         * EDPM doorbell transactions can take multiple 64b cycles, and as such
         * can "split" over the pci. Possibly, the doorbell drop can happen with
@@ -406,51 +409,74 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
 
 int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       u32 overflow;
+       u32 attn_ovfl, cur_ovfl;
        int rc;
 
-       overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
-       DP_NOTICE(p_hwfn, "PF Overflow sticky 0x%x\n", overflow);
-       if (!overflow) {
-               qed_db_recovery_execute(p_hwfn, DB_REC_ONCE);
+       attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT,
+                                      &p_hwfn->db_recovery_info.overflow);
+       cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
+       if (!cur_ovfl && !attn_ovfl)
                return 0;
-       }
 
-       if (qed_edpm_enabled(p_hwfn)) {
+       DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n",
+                 attn_ovfl, cur_ovfl);
+
+       if (cur_ovfl && !p_hwfn->db_bar_no_edpm) {
                rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
                if (rc)
                        return rc;
        }
 
-       /* Flush any pending (e)dpm as they may never arrive */
-       qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
-
        /* Release overflow sticky indication (stop silently dropping everything) */
        qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
 
        /* Repeat all last doorbells (doorbell drop recovery) */
-       qed_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL);
+       qed_db_recovery_execute(p_hwfn);
 
        return 0;
 }
 
-static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
+static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn)
 {
-       u32 int_sts, first_drop_reason, details, address, all_drops_reason;
        struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
+       u32 overflow;
        int rc;
 
-       int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
-       DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
+       overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
+       if (!overflow)
+               goto out;
+
+       /* Run PF doorbell recovery in next periodic handler */
+       set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow);
+
+       if (!p_hwfn->db_bar_no_edpm) {
+               rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
+               if (rc)
+                       goto out;
+       }
+
+       qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
+out:
+       /* Schedule the handler even if overflow was not detected */
+       qed_periodic_db_rec_start(p_hwfn);
+}
+
+static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn)
+{
+       u32 int_sts, first_drop_reason, details, address, all_drops_reason;
+       struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
 
        /* int_sts may be zero since all PFs were interrupted for doorbell
         * overflow but another one already handled it. Can abort here. If
         * This PF also requires overflow recovery we will be interrupted again.
         * The masked almost full indication may also be set. Ignoring.
         */
+       int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
        if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL))
                return 0;
 
+       DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
+
        /* check if db_drop or overflow happened */
        if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
                       DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
@@ -477,11 +503,6 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
                          GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
                          first_drop_reason, all_drops_reason);
 
-               rc = qed_db_rec_handler(p_hwfn, p_ptt);
-               qed_periodic_db_rec_start(p_hwfn);
-               if (rc)
-                       return rc;
-
                /* Clear the doorbell drop details and prepare for next drop */
                qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
 
@@ -507,6 +528,25 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
        return -EINVAL;
 }
 
+static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
+{
+       p_hwfn->db_recovery_info.dorq_attn = true;
+       qed_dorq_attn_overflow(p_hwfn);
+
+       return qed_dorq_attn_int_sts(p_hwfn);
+}
+
+static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn)
+{
+       if (p_hwfn->db_recovery_info.dorq_attn)
+               goto out;
+
+       /* Call DORQ callback if the attention was missed */
+       qed_dorq_attn_cb(p_hwfn);
+out:
+       p_hwfn->db_recovery_info.dorq_attn = false;
+}
+
 /* Instead of major changes to the data-structure, we have a some 'special'
  * identifiers for sources that changed meaning between adapters.
  */
@@ -1080,6 +1120,9 @@ static int qed_int_deassertion(struct qed_hwfn  *p_hwfn,
                }
        }
 
+       /* Handle missed DORQ attention */
+       qed_dorq_attn_handler(p_hwfn);
+
        /* Clear IGU indication for the deasserted bits */
        DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
                                    GTT_BAR0_MAP_REG_IGU_CMD +
index 1f356ed4f761e72486df4b57b1994e8f2dd89032..d473b522afc5137f69edece72c535397623ad05d 100644 (file)
@@ -192,8 +192,8 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
 
 /**
  * @brief - Doorbell Recovery handler.
- *          Run DB_REAL_DEAL doorbell recovery in case of PF overflow
- *          (and flush DORQ if needed), otherwise run DB_REC_ONCE.
+ *          Run doorbell recovery in case of PF overflow (and flush DORQ if
+ *          needed).
  *
  * @param p_hwfn
  * @param p_ptt
index f164d4acebcb43a4cd7b2858ad31e95e80467b74..6de23b56b2945c55118cbc3464a46881031583eb 100644 (file)
@@ -970,7 +970,7 @@ static void qed_update_pf_params(struct qed_dev *cdev,
        }
 }
 
-#define QED_PERIODIC_DB_REC_COUNT              100
+#define QED_PERIODIC_DB_REC_COUNT              10
 #define QED_PERIODIC_DB_REC_INTERVAL_MS                100
 #define QED_PERIODIC_DB_REC_INTERVAL \
        msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
index 9faaa6df78ed99b8b20b7f78b9efa9d4113b74e3..2f318aaf2b05d8145d4a0a4c45421fbff0bad455 100644 (file)
@@ -1591,7 +1591,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
                        p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
                } else {
                        DP_INFO(p_hwfn,
-                               "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
+                               "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n",
                                vf->abs_vf_id,
                                req->vfdev_info.eth_fp_hsi_major,
                                req->vfdev_info.eth_fp_hsi_minor,
index 5f3f42a25361679220fcc55224fcb2aa46adec03..bddb2b5982dcfedff2e8139741be978a1fc40e95 100644 (file)
@@ -490,18 +490,17 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
 
        ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
        if (IS_ERR(ptp->clock)) {
-               rc = -EINVAL;
                DP_ERR(edev, "PTP clock registration failed\n");
+               qede_ptp_disable(edev);
+               rc = -EINVAL;
                goto err2;
        }
 
        return 0;
 
-err2:
-       qede_ptp_disable(edev);
-       ptp->clock = NULL;
 err1:
        kfree(ptp);
+err2:
        edev->ptp = NULL;
 
        return rc;
index 0c443ea98479ac0971a6e36c28bd8bde2f080bfa..374a4d4371f99f23fe9d4507e0cbd33e177fbd85 100644 (file)
@@ -497,7 +497,7 @@ struct qlcnic_hardware_context {
        u16 board_type;
        u16 supported_type;
 
-       u16 link_speed;
+       u32 link_speed;
        u16 link_duplex;
        u16 link_autoneg;
        u16 module_type;
index 7562ccbbb39af59a2ba0e4078b2f43b2a7376809..ed651dde6ef9ee8970bc68acb9c6fb9282411189 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/firmware.h>
 #include <linux/prefetch.h>
+#include <linux/pci-aspm.h>
 #include <linux/ipv6.h>
 #include <net/ip6_checksum.h>
 
@@ -5460,7 +5461,7 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
        tp->cp_cmd |= PktCntrDisable | INTT_1;
        RTL_W16(tp, CPlusCmd, tp->cp_cmd);
 
-       RTL_W16(tp, IntrMitigate, 0x5151);
+       RTL_W16(tp, IntrMitigate, 0x5100);
 
        /* Work around for RxFIFO overflow. */
        if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
@@ -7352,6 +7353,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (rc)
                return rc;
 
+       /* Disable ASPM completely as that cause random device stop working
+        * problems as well as full system hangs for some PCIe devices users.
+        */
+       pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+
        /* enable device (incl. PCI PM wakeup and hotplug setup) */
        rc = pcim_enable_device(pdev);
        if (rc < 0) {
index 40d6356a7e73c213f0d1d073387b8605bb4f3726..3dfb07a78952533420da7cb1cb6b87b171d91661 100644 (file)
 /* Specific functions used for Ring mode */
 
 /* Enhanced descriptors */
-static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
+static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end,
+                                          int bfsize)
 {
-       p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
-                       << ERDES1_BUFFER2_SIZE_SHIFT)
-                  & ERDES1_BUFFER2_SIZE_MASK);
+       if (bfsize == BUF_SIZE_16KiB)
+               p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
+                               << ERDES1_BUFFER2_SIZE_SHIFT)
+                          & ERDES1_BUFFER2_SIZE_MASK);
 
        if (end)
                p->des1 |= cpu_to_le32(ERDES1_END_RING);
@@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
 }
 
 /* Normal descriptors */
-static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
+static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize)
 {
-       p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1)
-                               << RDES1_BUFFER2_SIZE_SHIFT)
-                   & RDES1_BUFFER2_SIZE_MASK);
+       if (bfsize >= BUF_SIZE_2KiB) {
+               int bfsize2;
+
+               bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1);
+               p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT)
+                           & RDES1_BUFFER2_SIZE_MASK);
+       }
 
        if (end)
                p->des1 |= cpu_to_le32(RDES1_END_RING);
index 7fbb6a4dbf5107723f16b825e7a3b1577c97254a..e061e9f5fad71f065440605646da812d1b9daf51 100644 (file)
@@ -296,7 +296,7 @@ exit:
 }
 
 static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
-                                  int mode, int end)
+                                  int mode, int end, int bfsize)
 {
        dwmac4_set_rx_owner(p, disable_rx_ic);
 }
index 1d858fdec99718ec63a5fa1064fe9dd99670d2e0..98fa471da7c0f2764729f98c7044f52e068c1db9 100644 (file)
@@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
 }
 
 static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
-                                 int mode, int end)
+                                 int mode, int end, int bfsize)
 {
        dwxgmac2_set_rx_owner(p, disable_rx_ic);
 }
index 5ef91a790f9d16fbd122f71e130cf7ecf5249a68..5202d6ad79194b0ed9134a7905d0aaa4309c6822 100644 (file)
@@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
        if (unlikely(rdes0 & RDES0_OWN))
                return dma_own;
 
+       if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
+               stats->rx_length_errors++;
+               return discard_frame;
+       }
+
        if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
                if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
                        x->rx_desc++;
@@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
         * It doesn't match with the information reported into the databook.
         * At any rate, we need to understand if the CSUM hw computation is ok
         * and report this info to the upper layers. */
-       ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
-                                !!(rdes0 & RDES0_FRAME_TYPE),
-                                !!(rdes0 & ERDES0_RX_MAC_ADDR));
+       if (likely(ret == good_frame))
+               ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
+                                        !!(rdes0 & RDES0_FRAME_TYPE),
+                                        !!(rdes0 & ERDES0_RX_MAC_ADDR));
 
        if (unlikely(rdes0 & RDES0_DRIBBLING))
                x->dribbling_bit++;
@@ -259,15 +265,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
 }
 
 static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
-                                 int mode, int end)
+                                 int mode, int end, int bfsize)
 {
+       int bfsize1;
+
        p->des0 |= cpu_to_le32(RDES0_OWN);
-       p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK);
+
+       bfsize1 = min(bfsize, BUF_SIZE_8KiB);
+       p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
 
        if (mode == STMMAC_CHAIN_MODE)
                ehn_desc_rx_set_on_chain(p);
        else
-               ehn_desc_rx_set_on_ring(p, end);
+               ehn_desc_rx_set_on_ring(p, end, bfsize);
 
        if (disable_rx_ic)
                p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
index 92b8944f26e3c8566d9e68de4820d06231ca10bd..5bb00234d961c6a5a2385c90bc3fdf54ff96e4ca 100644 (file)
@@ -33,7 +33,7 @@ struct dma_extended_desc;
 struct stmmac_desc_ops {
        /* DMA RX descriptor ring initialization */
        void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode,
-                       int end);
+                       int end, int bfsize);
        /* DMA TX descriptor ring initialization */
        void (*init_tx_desc)(struct dma_desc *p, int mode, int end);
        /* Invoked by the xmit function to prepare the tx descriptor */
index de65bb29feba967cc7a0d6ff3184998f359dde34..b7dd4e3c760d82da1439fb0d8dd9d418c34256a2 100644 (file)
@@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
                return dma_own;
 
        if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
-               pr_warn("%s: Oversized frame spanned multiple buffers\n",
-                       __func__);
                stats->rx_length_errors++;
                return discard_frame;
        }
@@ -135,15 +133,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
 }
 
 static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
-                              int end)
+                              int end, int bfsize)
 {
+       int bfsize1;
+
        p->des0 |= cpu_to_le32(RDES0_OWN);
-       p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK);
+
+       bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
+       p->des1 |= cpu_to_le32(bfsize & RDES1_BUFFER1_SIZE_MASK);
 
        if (mode == STMMAC_CHAIN_MODE)
                ndesc_rx_set_on_chain(p, end);
        else
-               ndesc_rx_set_on_ring(p, end);
+               ndesc_rx_set_on_ring(p, end, bfsize);
 
        if (disable_rx_ic)
                p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
index 6a2e1031a62ae3c4d16f7f09f4d09481ccfa325d..a26e36dbb5df0deff58ef23a18df287c0ceef330 100644 (file)
@@ -1136,11 +1136,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
                if (priv->extend_desc)
                        stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
                                        priv->use_riwt, priv->mode,
-                                       (i == DMA_RX_SIZE - 1));
+                                       (i == DMA_RX_SIZE - 1),
+                                       priv->dma_buf_sz);
                else
                        stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
                                        priv->use_riwt, priv->mode,
-                                       (i == DMA_RX_SIZE - 1));
+                                       (i == DMA_RX_SIZE - 1),
+                                       priv->dma_buf_sz);
 }
 
 /**
@@ -3352,9 +3354,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
 {
        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
        struct stmmac_channel *ch = &priv->channel[queue];
-       unsigned int entry = rx_q->cur_rx;
+       unsigned int next_entry = rx_q->cur_rx;
        int coe = priv->hw->rx_csum;
-       unsigned int next_entry;
        unsigned int count = 0;
        bool xmac;
 
@@ -3372,10 +3373,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
        }
        while (count < limit) {
-               int status;
+               int entry, status;
                struct dma_desc *p;
                struct dma_desc *np;
 
+               entry = next_entry;
+
                if (priv->extend_desc)
                        p = (struct dma_desc *)(rx_q->dma_erx + entry);
                else
@@ -3431,11 +3434,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                         *  ignored
                         */
                        if (frame_len > priv->dma_buf_sz) {
-                               netdev_err(priv->dev,
-                                          "len %d larger than size (%d)\n",
-                                          frame_len, priv->dma_buf_sz);
+                               if (net_ratelimit())
+                                       netdev_err(priv->dev,
+                                                  "len %d larger than size (%d)\n",
+                                                  frame_len, priv->dma_buf_sz);
                                priv->dev->stats.rx_length_errors++;
-                               break;
+                               continue;
                        }
 
                        /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
@@ -3470,7 +3474,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                                                dev_warn(priv->device,
                                                         "packet dropped\n");
                                        priv->dev->stats.rx_dropped++;
-                                       break;
+                                       continue;
                                }
 
                                dma_sync_single_for_cpu(priv->device,
@@ -3490,11 +3494,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                        } else {
                                skb = rx_q->rx_skbuff[entry];
                                if (unlikely(!skb)) {
-                                       netdev_err(priv->dev,
-                                                  "%s: Inconsistent Rx chain\n",
-                                                  priv->dev->name);
+                                       if (net_ratelimit())
+                                               netdev_err(priv->dev,
+                                                          "%s: Inconsistent Rx chain\n",
+                                                          priv->dev->name);
                                        priv->dev->stats.rx_dropped++;
-                                       break;
+                                       continue;
                                }
                                prefetch(skb->data - NET_IP_ALIGN);
                                rx_q->rx_skbuff[entry] = NULL;
@@ -3529,7 +3534,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                        priv->dev->stats.rx_packets++;
                        priv->dev->stats.rx_bytes += frame_len;
                }
-               entry = next_entry;
        }
 
        stmmac_rx_refill(priv, queue);
index e859ae2e42d5a152a567de048e898eeafa99fcb5..49f41b64077bb9877d74ea40bbddffea5fe835d4 100644 (file)
@@ -987,6 +987,7 @@ struct netvsc_device {
 
        wait_queue_head_t wait_drain;
        bool destroy;
+       bool tx_disable; /* if true, do not wake up queue again */
 
        /* Receive buffer allocated by us but manages by NetVSP */
        void *recv_buf;
index 813d195bbd57fed2ef96ea708b637ca8197be458..e0dce373cdd9d875ded78bff545d76b32920f69c 100644 (file)
@@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void)
 
        init_waitqueue_head(&net_device->wait_drain);
        net_device->destroy = false;
+       net_device->tx_disable = false;
 
        net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
        net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
@@ -719,7 +720,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
        } else {
                struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
 
-               if (netif_tx_queue_stopped(txq) &&
+               if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
                    (hv_get_avail_to_write_percent(&channel->outbound) >
                     RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
                        netif_tx_wake_queue(txq);
@@ -874,7 +875,8 @@ static inline int netvsc_send_pkt(
        } else if (ret == -EAGAIN) {
                netif_tx_stop_queue(txq);
                ndev_ctx->eth_stats.stop_queue++;
-               if (atomic_read(&nvchan->queue_sends) < 1) {
+               if (atomic_read(&nvchan->queue_sends) < 1 &&
+                   !net_device->tx_disable) {
                        netif_tx_wake_queue(txq);
                        ndev_ctx->eth_stats.wake_queue++;
                        ret = -ENOSPC;
index cf4897043e833618b5f5d6d452914ddbeff0e00d..b20fb0fb595bde3d7455e2c6214812fef6732017 100644 (file)
@@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net)
        rcu_read_unlock();
 }
 
+static void netvsc_tx_enable(struct netvsc_device *nvscdev,
+                            struct net_device *ndev)
+{
+       nvscdev->tx_disable = false;
+       virt_wmb(); /* ensure queue wake up mechanism is on */
+
+       netif_tx_wake_all_queues(ndev);
+}
+
 static int netvsc_open(struct net_device *net)
 {
        struct net_device_context *ndev_ctx = netdev_priv(net);
@@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net)
        rdev = nvdev->extension;
        if (!rdev->link_state) {
                netif_carrier_on(net);
-               netif_tx_wake_all_queues(net);
+               netvsc_tx_enable(nvdev, net);
        }
 
        if (vf_netdev) {
@@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
        }
 }
 
+static void netvsc_tx_disable(struct netvsc_device *nvscdev,
+                             struct net_device *ndev)
+{
+       if (nvscdev) {
+               nvscdev->tx_disable = true;
+               virt_wmb(); /* ensure txq will not wake up after stop */
+       }
+
+       netif_tx_disable(ndev);
+}
+
 static int netvsc_close(struct net_device *net)
 {
        struct net_device_context *net_device_ctx = netdev_priv(net);
@@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net)
        struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
        int ret;
 
-       netif_tx_disable(net);
+       netvsc_tx_disable(nvdev, net);
 
        /* No need to close rndis filter if it is removed already */
        if (!nvdev)
@@ -920,7 +940,7 @@ static int netvsc_detach(struct net_device *ndev,
 
        /* If device was up (receiving) then shutdown */
        if (netif_running(ndev)) {
-               netif_tx_disable(ndev);
+               netvsc_tx_disable(nvdev, ndev);
 
                ret = rndis_filter_close(nvdev);
                if (ret) {
@@ -1908,7 +1928,7 @@ static void netvsc_link_change(struct work_struct *w)
                if (rdev->link_state) {
                        rdev->link_state = false;
                        netif_carrier_on(net);
-                       netif_tx_wake_all_queues(net);
+                       netvsc_tx_enable(net_device, net);
                } else {
                        notify = true;
                }
@@ -1918,7 +1938,7 @@ static void netvsc_link_change(struct work_struct *w)
                if (!rdev->link_state) {
                        rdev->link_state = true;
                        netif_carrier_off(net);
-                       netif_tx_stop_all_queues(net);
+                       netvsc_tx_disable(net_device, net);
                }
                kfree(event);
                break;
@@ -1927,7 +1947,7 @@ static void netvsc_link_change(struct work_struct *w)
                if (!rdev->link_state) {
                        rdev->link_state = true;
                        netif_carrier_off(net);
-                       netif_tx_stop_all_queues(net);
+                       netvsc_tx_disable(net_device, net);
                        event->event = RNDIS_STATUS_MEDIA_CONNECT;
                        spin_lock_irqsave(&ndev_ctx->lock, flags);
                        list_add(&event->list, &ndev_ctx->reconfig_events);
index 6ed96fdfd96dd5a858e8416fcfcfa8c78c628e2a..9ce61b019aadb55df9177301e0752295d61ba096 100644 (file)
@@ -1246,6 +1246,23 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
                goto err_option_port_add;
        }
 
+       /* set promiscuity level to new slave */
+       if (dev->flags & IFF_PROMISC) {
+               err = dev_set_promiscuity(port_dev, 1);
+               if (err)
+                       goto err_set_slave_promisc;
+       }
+
+       /* set allmulti level to new slave */
+       if (dev->flags & IFF_ALLMULTI) {
+               err = dev_set_allmulti(port_dev, 1);
+               if (err) {
+                       if (dev->flags & IFF_PROMISC)
+                               dev_set_promiscuity(port_dev, -1);
+                       goto err_set_slave_promisc;
+               }
+       }
+
        netif_addr_lock_bh(dev);
        dev_uc_sync_multiple(port_dev, dev);
        dev_mc_sync_multiple(port_dev, dev);
@@ -1262,6 +1279,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
 
        return 0;
 
+err_set_slave_promisc:
+       __team_option_inst_del_port(team, port);
+
 err_option_port_add:
        team_upper_dev_unlink(team, port);
 
@@ -1307,6 +1327,12 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
 
        team_port_disable(team, port);
        list_del_rcu(&port->list);
+
+       if (dev->flags & IFF_PROMISC)
+               dev_set_promiscuity(port_dev, -1);
+       if (dev->flags & IFF_ALLMULTI)
+               dev_set_allmulti(port_dev, -1);
+
        team_upper_dev_unlink(team, port);
        netdev_rx_handler_unregister(port_dev);
        team_port_disable_netpoll(port);
index 74bebbdb4b158791135410d00591e84eb9da7073..9195f3476b1d7924de0acedab5c90d2f22533e5a 100644 (file)
@@ -1203,6 +1203,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x19d2, 0x2002, 4)},    /* ZTE (Vodafone) K3765-Z */
        {QMI_FIXED_INTF(0x2001, 0x7e19, 4)},    /* D-Link DWM-221 B1 */
        {QMI_FIXED_INTF(0x2001, 0x7e35, 4)},    /* D-Link DWM-222 */
+       {QMI_FIXED_INTF(0x2020, 0x2031, 4)},    /* Olicard 600 */
        {QMI_FIXED_INTF(0x2020, 0x2033, 4)},    /* BroadMobi BM806U */
        {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
        {QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
index 7c1430ed02445b6e6f13c663b555ef550276c899..cd15c32b2e43686925161ad48b080842f00ec19c 100644 (file)
@@ -1273,9 +1273,14 @@ static void vrf_setup(struct net_device *dev)
 
        /* default to no qdisc; user can add if desired */
        dev->priv_flags |= IFF_NO_QUEUE;
+       dev->priv_flags |= IFF_NO_RX_HANDLER;
 
-       dev->min_mtu = 0;
-       dev->max_mtu = 0;
+       /* VRF devices do not care about MTU, but if the MTU is set
+        * too low then the ipv4 and ipv6 protocols are disabled
+        * which breaks networking.
+        */
+       dev->min_mtu = IPV6_MIN_MTU;
+       dev->max_mtu = ETH_MAX_MTU;
 }
 
 static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
index a20ea270d519be335b9b0086b1d5f9c8ea3d385d..1acc622d218333ac131666536b1077fb1b9ee808 100644 (file)
@@ -2728,7 +2728,7 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
                        num_msdus++;
                        num_bytes += ret;
                }
-               ieee80211_return_txq(hw, txq);
+               ieee80211_return_txq(hw, txq, false);
                ieee80211_txq_schedule_end(hw, txq->ac);
 
                record->num_msdus = cpu_to_le16(num_msdus);
index b73c23d4ce86d0cd0631a4838b4ce3a150e34f49..41e89db244d20e67f27d3f226bbceba750962fb0 100644 (file)
@@ -4089,7 +4089,7 @@ static int ath10k_mac_schedule_txq(struct ieee80211_hw *hw, u32 ac)
                        if (ret < 0)
                                break;
                }
-               ieee80211_return_txq(hw, txq);
+               ieee80211_return_txq(hw, txq, false);
                ath10k_htt_tx_txq_update(hw, txq);
                if (ret == -EBUSY)
                        break;
@@ -4374,7 +4374,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
                if (ret < 0)
                        break;
        }
-       ieee80211_return_txq(hw, txq);
+       ieee80211_return_txq(hw, txq, false);
        ath10k_htt_tx_txq_update(hw, txq);
 out:
        ieee80211_txq_schedule_end(hw, ac);
index 773d428ff1b03328ca43c1c8db74103d8d846444..b17e1ca40995eab7b0f80c479f0cff7381801e76 100644 (file)
@@ -1938,12 +1938,15 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
                goto out;
 
        while ((queue = ieee80211_next_txq(hw, txq->mac80211_qnum))) {
+               bool force;
+
                tid = (struct ath_atx_tid *)queue->drv_priv;
 
                ret = ath_tx_sched_aggr(sc, txq, tid);
                ath_dbg(common, QUEUE, "ath_tx_sched_aggr returned %d\n", ret);
 
-               ieee80211_return_txq(hw, queue);
+               force = !skb_queue_empty(&tid->retry_q);
+               ieee80211_return_txq(hw, queue, force);
        }
 
 out:
index fdc56f821b5ac0961f8b503d5f918999a01e214e..eb6defb6d0cd9b1e61e74c2d3e173c8a9d916da8 100644 (file)
@@ -82,6 +82,7 @@
 #define IWL_22000_HR_A0_FW_PRE         "iwlwifi-QuQnj-a0-hr-a0-"
 #define IWL_22000_SU_Z0_FW_PRE         "iwlwifi-su-z0-"
 #define IWL_QU_B_JF_B_FW_PRE           "iwlwifi-Qu-b0-jf-b0-"
+#define IWL_QUZ_A_HR_B_FW_PRE          "iwlwifi-QuZ-a0-hr-b0-"
 #define IWL_QNJ_B_JF_B_FW_PRE          "iwlwifi-QuQnj-b0-jf-b0-"
 #define IWL_CC_A_FW_PRE                        "iwlwifi-cc-a0-"
 #define IWL_22000_SO_A_JF_B_FW_PRE     "iwlwifi-so-a0-jf-b0-"
        IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
 #define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
        IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
-#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
-       IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \
+       IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode"
 #define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
        IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
 #define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api)            \
@@ -235,8 +236,20 @@ const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
        .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
 };
 
-const struct iwl_cfg iwl22260_2ax_cfg = {
-       .name = "Intel(R) Wireless-AX 22260",
+const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
+       .name = "Intel(R) Wi-Fi 6 AX101",
+       .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
+       IWL_DEVICE_22500,
+       /*
+        * This device doesn't support receiving BlockAck with a large bitmap
+        * so we need to restrict the size of transmitted aggregation to the
+        * HT size; mac80211 would otherwise pick the HE max (256) by default.
+        */
+       .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl_ax200_cfg_cc = {
+       .name = "Intel(R) Wi-Fi 6 AX200 160MHz",
        .fw_name_pre = IWL_CC_A_FW_PRE,
        IWL_DEVICE_22500,
        /*
@@ -249,7 +262,7 @@ const struct iwl_cfg iwl22260_2ax_cfg = {
 };
 
 const struct iwl_cfg killer1650x_2ax_cfg = {
-       .name = "Killer(R) Wireless-AX 1650x Wireless Network Adapter (200NGW)",
+       .name = "Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)",
        .fw_name_pre = IWL_CC_A_FW_PRE,
        IWL_DEVICE_22500,
        /*
@@ -262,7 +275,7 @@ const struct iwl_cfg killer1650x_2ax_cfg = {
 };
 
 const struct iwl_cfg killer1650w_2ax_cfg = {
-       .name = "Killer(R) Wireless-AX 1650w Wireless Network Adapter (200D2W)",
+       .name = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)",
        .fw_name_pre = IWL_CC_A_FW_PRE,
        IWL_DEVICE_22500,
        /*
@@ -328,7 +341,7 @@ const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = {
 };
 
 const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
-       .name = "Killer(R) Wireless-AX 1650i Wireless Network Adapter (22560NGW)",
+       .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
        .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
        IWL_DEVICE_22500,
        /*
@@ -340,7 +353,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
 };
 
 const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
-       .name = "Killer(R) Wireless-AX 1650s Wireless Network Adapter (22560D2W)",
+       .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
        .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
        IWL_DEVICE_22500,
        /*
@@ -444,6 +457,7 @@ MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
index f119c49cd39cd516c09459f4f145f898bd4fe38b..d7380016f1c0d4f4d85fd9b063f7344f5bd954c0 100644 (file)
@@ -1614,6 +1614,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
        if (!range) {
                IWL_ERR(fwrt, "Failed to fill region header: id=%d, type=%d\n",
                        le32_to_cpu(reg->region_id), type);
+               memset(*data, 0, le32_to_cpu((*data)->len));
                return;
        }
 
@@ -1623,6 +1624,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
                if (range_size < 0) {
                        IWL_ERR(fwrt, "Failed to dump region: id=%d, type=%d\n",
                                le32_to_cpu(reg->region_id), type);
+                       memset(*data, 0, le32_to_cpu((*data)->len));
                        return;
                }
                range = range + range_size;
@@ -1807,12 +1809,12 @@ _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
 
        trigger = fwrt->dump.active_trigs[id].trig;
 
-       size = sizeof(*dump_file);
-       size += iwl_fw_ini_get_trigger_len(fwrt, trigger);
-
+       size = iwl_fw_ini_get_trigger_len(fwrt, trigger);
        if (!size)
                return NULL;
 
+       size += sizeof(*dump_file);
+
        dump_file = vzalloc(size);
        if (!dump_file)
                return NULL;
@@ -1942,14 +1944,10 @@ int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
        iwl_dump_error_desc->len = 0;
 
        ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0);
-       if (ret) {
+       if (ret)
                kfree(iwl_dump_error_desc);
-       } else {
-               set_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status);
-
-               /* trigger nmi to halt the fw */
-               iwl_force_nmi(fwrt->trans);
-       }
+       else
+               iwl_trans_sync_nmi(fwrt->trans);
 
        return ret;
 }
@@ -2489,22 +2487,6 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point);
 
 void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt)
 {
-       /* if the wait event timeout elapses instead of wake up then
-        * the driver did not receive NMI interrupt and can not assume the FW
-        * is halted
-        */
-       int ret = wait_event_timeout(fwrt->trans->fw_halt_waitq,
-                                    !test_bit(STATUS_FW_WAIT_DUMP,
-                                              &fwrt->trans->status),
-                                    msecs_to_jiffies(2000));
-       if (!ret) {
-               /* failed to receive NMI interrupt, assuming the FW is stuck */
-               set_bit(STATUS_FW_ERROR, &fwrt->trans->status);
-
-               clear_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status);
-       }
-
-       /* Assuming the op mode mutex is held at this point */
        iwl_fw_dbg_collect_sync(fwrt);
 
        iwl_trans_stop_device(fwrt->trans);
index 7adf4e4e841a92f3ae98534b011175e3cfb00ce7..12310e3d2fc5aa7b544b08c95ad3086c3de799c1 100644 (file)
@@ -76,7 +76,6 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
        fwrt->ops_ctx = ops_ctx;
        INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk);
        iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir);
-       init_waitqueue_head(&fwrt->trans->fw_halt_waitq);
 }
 IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
 
index f5f87773667b0bd2b68ccadcddcf3184777c04f9..93070848280a4e425f95de9842b5bbc42000fcfb 100644 (file)
@@ -549,8 +549,9 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
 extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
 extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
 extern const struct iwl_cfg iwl_ax101_cfg_qu_hr;
+extern const struct iwl_cfg iwl_ax101_cfg_quz_hr;
 extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
-extern const struct iwl_cfg iwl22260_2ax_cfg;
+extern const struct iwl_cfg iwl_ax200_cfg_cc;
 extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
 extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
 extern const struct iwl_cfg killer1650x_2ax_cfg;
index aea6d03e545a1db063f795c49a2b2c123b954452..e539bc94eff7fdcee8e4c979fe0581710768b275 100644 (file)
@@ -327,6 +327,7 @@ enum {
 #define CSR_HW_REV_TYPE_NONE           (0x00001F0)
 #define CSR_HW_REV_TYPE_QNJ            (0x0000360)
 #define CSR_HW_REV_TYPE_QNJ_B0         (0x0000364)
+#define CSR_HW_REV_TYPE_QUZ            (0x0000354)
 #define CSR_HW_REV_TYPE_HR_CDB         (0x0000340)
 #define CSR_HW_REV_TYPE_SO             (0x0000370)
 #define CSR_HW_REV_TYPE_TY             (0x0000420)
index bbebbf3efd57db1a2101a5e6cc02cf2095666dd7..d8690acee40c0c45668f6480b10daff295633500 100644 (file)
@@ -338,7 +338,6 @@ enum iwl_d3_status {
  *     are sent
  * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
  * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
- * @STATUS_FW_WAIT_DUMP: if set, wait until cleared before collecting dump
  */
 enum iwl_trans_status {
        STATUS_SYNC_HCMD_ACTIVE,
@@ -351,7 +350,6 @@ enum iwl_trans_status {
        STATUS_TRANS_GOING_IDLE,
        STATUS_TRANS_IDLE,
        STATUS_TRANS_DEAD,
-       STATUS_FW_WAIT_DUMP,
 };
 
 static inline int
@@ -618,6 +616,7 @@ struct iwl_trans_ops {
        struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
                                                 u32 dump_mask);
        void (*debugfs_cleanup)(struct iwl_trans *trans);
+       void (*sync_nmi)(struct iwl_trans *trans);
 };
 
 /**
@@ -831,7 +830,6 @@ struct iwl_trans {
        u32 lmac_error_event_table[2];
        u32 umac_error_event_table;
        unsigned int error_event_table_tlv_status;
-       wait_queue_head_t fw_halt_waitq;
 
        /* pointer to trans specific struct */
        /*Ensure that this pointer will always be aligned to sizeof pointer */
@@ -1239,10 +1237,12 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
        /* prevent double restarts due to the same erroneous FW */
        if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
                iwl_op_mode_nic_error(trans->op_mode);
+}
 
-       if (test_and_clear_bit(STATUS_FW_WAIT_DUMP, &trans->status))
-               wake_up(&trans->fw_halt_waitq);
-
+static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
+{
+       if (trans->ops->sync_nmi)
+               trans->ops->sync_nmi(trans);
 }
 
 /*****************************************************
index 3a92c09d46926fa6d2d565df9e1479e7988ccde1..6a3b11dd2edf53cf4352178a56189c1ebe99de66 100644 (file)
@@ -2714,9 +2714,6 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
 
        iwl_mvm_mac_ctxt_remove(mvm, vif);
 
-       kfree(mvmvif->ap_wep_key);
-       mvmvif->ap_wep_key = NULL;
-
        mutex_unlock(&mvm->mutex);
 }
 
@@ -3183,24 +3180,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
                ret = iwl_mvm_update_sta(mvm, vif, sta);
        } else if (old_state == IEEE80211_STA_ASSOC &&
                   new_state == IEEE80211_STA_AUTHORIZED) {
-               /* if wep is used, need to set the key for the station now */
-               if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) {
-                       mvm_sta->wep_key =
-                               kmemdup(mvmvif->ap_wep_key,
-                                       sizeof(*mvmvif->ap_wep_key) +
-                                       mvmvif->ap_wep_key->keylen,
-                                       GFP_KERNEL);
-                       if (!mvm_sta->wep_key) {
-                               ret = -ENOMEM;
-                               goto out_unlock;
-                       }
-
-                       ret = iwl_mvm_set_sta_key(mvm, vif, sta,
-                                                 mvm_sta->wep_key,
-                                                 STA_KEY_IDX_INVALID);
-               } else {
-                       ret = 0;
-               }
+               ret = 0;
 
                /* we don't support TDLS during DCM */
                if (iwl_mvm_phy_ctx_count(mvm) > 1)
@@ -3242,17 +3222,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
                                                   NL80211_TDLS_DISABLE_LINK);
                }
 
-               /* Remove STA key if this is an AP using WEP */
-               if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) {
-                       int rm_ret = iwl_mvm_remove_sta_key(mvm, vif, sta,
-                                                           mvm_sta->wep_key);
-
-                       if (!ret)
-                               ret = rm_ret;
-                       kfree(mvm_sta->wep_key);
-                       mvm_sta->wep_key = NULL;
-               }
-
                if (unlikely(ret &&
                             test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
                                      &mvm->status)))
@@ -3289,6 +3258,13 @@ static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
                                  struct ieee80211_sta *sta, u32 changed)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       if (changed & (IEEE80211_RC_BW_CHANGED |
+                      IEEE80211_RC_SUPP_RATES_CHANGED |
+                      IEEE80211_RC_NSS_CHANGED))
+               iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
+                                    true);
 
        if (vif->type == NL80211_IFTYPE_STATION &&
            changed & IEEE80211_RC_NSS_CHANGED)
@@ -3439,20 +3415,12 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
                break;
        case WLAN_CIPHER_SUITE_WEP40:
        case WLAN_CIPHER_SUITE_WEP104:
-               if (vif->type == NL80211_IFTYPE_AP) {
-                       struct iwl_mvm_vif *mvmvif =
-                               iwl_mvm_vif_from_mac80211(vif);
-
-                       mvmvif->ap_wep_key = kmemdup(key,
-                                                    sizeof(*key) + key->keylen,
-                                                    GFP_KERNEL);
-                       if (!mvmvif->ap_wep_key)
-                               return -ENOMEM;
-               }
-
-               if (vif->type != NL80211_IFTYPE_STATION)
-                       return 0;
-               break;
+               if (vif->type == NL80211_IFTYPE_STATION)
+                       break;
+               if (iwl_mvm_has_new_tx_api(mvm))
+                       return -EOPNOTSUPP;
+               /* support HW crypto on TX */
+               return 0;
        default:
                /* currently FW supports only one optional cipher scheme */
                if (hw->n_cipher_schemes &&
@@ -3540,12 +3508,17 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
                ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
                if (ret) {
                        IWL_WARN(mvm, "set key failed\n");
+                       key->hw_key_idx = STA_KEY_IDX_INVALID;
                        /*
                         * can't add key for RX, but we don't need it
-                        * in the device for TX so still return 0
+                        * in the device for TX so still return 0,
+                        * unless we have new TX API where we cannot
+                        * put key material into the TX_CMD
                         */
-                       key->hw_key_idx = STA_KEY_IDX_INVALID;
-                       ret = 0;
+                       if (iwl_mvm_has_new_tx_api(mvm))
+                               ret = -EOPNOTSUPP;
+                       else
+                               ret = 0;
                }
 
                break;
index bca6f6b536d9754133c9ac8ab00e271c1f2bb06f..a50dc53df08698ff0afafbef8f74692007afc17b 100644 (file)
@@ -498,7 +498,6 @@ struct iwl_mvm_vif {
        netdev_features_t features;
 
        struct iwl_probe_resp_data __rcu *probe_resp_data;
-       struct ieee80211_key_conf *ap_wep_key;
 };
 
 static inline struct iwl_mvm_vif *
index 498c315291cfac599bd23df37fad6c3e28541201..98d123dd7177845ff1676df2dbbdb2c492d89f6f 100644 (file)
@@ -8,7 +8,7 @@
  * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
  * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -1399,7 +1399,9 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
 
                iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
                list_del_init(&mvmtxq->list);
+               local_bh_disable();
                iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
+               local_bh_enable();
        }
 
        mutex_unlock(&mvm->mutex);
@@ -2333,21 +2335,6 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
                                   timeout);
 
-       if (mvmvif->ap_wep_key) {
-               u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
-
-               __set_bit(key_offset, mvm->fw_key_table);
-
-               if (key_offset == STA_KEY_IDX_INVALID)
-                       return -ENOSPC;
-
-               ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id,
-                                          mvmvif->ap_wep_key, true, 0, NULL, 0,
-                                          key_offset, 0);
-               if (ret)
-                       return ret;
-       }
-
        return 0;
 }
 
@@ -2419,28 +2406,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
        iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
 
-       if (mvmvif->ap_wep_key) {
-               int i;
-
-               if (!__test_and_clear_bit(mvmvif->ap_wep_key->hw_key_idx,
-                                         mvm->fw_key_table)) {
-                       IWL_ERR(mvm, "offset %d not used in fw key table.\n",
-                               mvmvif->ap_wep_key->hw_key_idx);
-                       return -ENOENT;
-               }
-
-               /* track which key was deleted last */
-               for (i = 0; i < STA_KEY_MAX_NUM; i++) {
-                       if (mvm->fw_key_deleted[i] < U8_MAX)
-                               mvm->fw_key_deleted[i]++;
-               }
-               mvm->fw_key_deleted[mvmvif->ap_wep_key->hw_key_idx] = 0;
-               ret = __iwl_mvm_remove_sta_key(mvm, mvmvif->mcast_sta.sta_id,
-                                              mvmvif->ap_wep_key, true);
-               if (ret)
-                       return ret;
-       }
-
        ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
        if (ret)
                IWL_WARN(mvm, "Failed sending remove station\n");
index 79700c7310a1a3cf38162d6ed3c582fe7c6ed67c..b4d4071b865db90dc81fd8c2db7d410b66686f30 100644 (file)
@@ -8,7 +8,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -394,7 +394,6 @@ struct iwl_mvm_rxq_dup_data {
  *     the BA window. To be used for UAPSD only.
  * @ptk_pn: per-queue PTK PN data structures
  * @dup_data: per queue duplicate packet detection data
- * @wep_key: used in AP mode. Is a duplicate of the WEP key.
  * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
  * @tx_ant: the index of the antenna to use for data tx to this station. Only
  *     used during connection establishment (e.g. for the 4 way handshake
@@ -426,8 +425,6 @@ struct iwl_mvm_sta {
        struct iwl_mvm_key_pn __rcu *ptk_pn[4];
        struct iwl_mvm_rxq_dup_data *dup_data;
 
-       struct ieee80211_key_conf *wep_key;
-
        u8 reserved_queue;
 
        /* Temporary, until the new TLC will control the Tx protection */
index 2b94e4cef56cfc5fd25a0343f189116ca0e78c96..9f1af8da9dc181eb1dcc48b2f0fb1d1b7ffa9836 100644 (file)
@@ -953,14 +953,15 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
        {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax101_cfg_qu_hr)},
 
-       {IWL_PCI_DEVICE(0x2723, 0x0080, iwl22260_2ax_cfg)},
-       {IWL_PCI_DEVICE(0x2723, 0x0084, iwl22260_2ax_cfg)},
-       {IWL_PCI_DEVICE(0x2723, 0x0088, iwl22260_2ax_cfg)},
-       {IWL_PCI_DEVICE(0x2723, 0x008C, iwl22260_2ax_cfg)},
+       {IWL_PCI_DEVICE(0x2723, 0x0080, iwl_ax200_cfg_cc)},
+       {IWL_PCI_DEVICE(0x2723, 0x0084, iwl_ax200_cfg_cc)},
+       {IWL_PCI_DEVICE(0x2723, 0x0088, iwl_ax200_cfg_cc)},
+       {IWL_PCI_DEVICE(0x2723, 0x008C, iwl_ax200_cfg_cc)},
        {IWL_PCI_DEVICE(0x2723, 0x1653, killer1650w_2ax_cfg)},
        {IWL_PCI_DEVICE(0x2723, 0x1654, killer1650x_2ax_cfg)},
-       {IWL_PCI_DEVICE(0x2723, 0x4080, iwl22260_2ax_cfg)},
-       {IWL_PCI_DEVICE(0x2723, 0x4088, iwl22260_2ax_cfg)},
+       {IWL_PCI_DEVICE(0x2723, 0x2080, iwl_ax200_cfg_cc)},
+       {IWL_PCI_DEVICE(0x2723, 0x4080, iwl_ax200_cfg_cc)},
+       {IWL_PCI_DEVICE(0x2723, 0x4088, iwl_ax200_cfg_cc)},
 
        {IWL_PCI_DEVICE(0x1a56, 0x1653, killer1650w_2ax_cfg)},
        {IWL_PCI_DEVICE(0x1a56, 0x1654, killer1650x_2ax_cfg)},
index bf8b61a476c5b017fac5a94e6cd7eb894116d169..59213164f35e3814cd0d7618cf8f6f54fd873f5b 100644 (file)
@@ -1043,7 +1043,7 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
 
 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
-void iwl_trans_sync_nmi(struct iwl_trans *trans);
+void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
index fe8269d023def832e97701bc7c1bd0c57df05a96..79c1dc05f9488ddae52b2d820897b0d255afb1d3 100644 (file)
@@ -3318,7 +3318,8 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
        .unref = iwl_trans_pcie_unref,                                  \
        .dump_data = iwl_trans_pcie_dump_data,                          \
        .d3_suspend = iwl_trans_pcie_d3_suspend,                        \
-       .d3_resume = iwl_trans_pcie_d3_resume
+       .d3_resume = iwl_trans_pcie_d3_resume,                          \
+       .sync_nmi = iwl_trans_pcie_sync_nmi
 
 #ifdef CONFIG_PM_SLEEP
 #define IWL_TRANS_PM_OPS                                               \
@@ -3542,6 +3543,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                }
        } else if (cfg == &iwl_ax101_cfg_qu_hr) {
                if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+                   CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
+                   trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
+                       trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+               } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
                    CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
                        trans->cfg = &iwl_ax101_cfg_qu_hr;
                } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
@@ -3560,7 +3565,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                }
        } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
                   CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
-                  (trans->cfg != &iwl22260_2ax_cfg ||
+                  (trans->cfg != &iwl_ax200_cfg_cc ||
                    trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
                u32 hw_status;
 
@@ -3637,7 +3642,7 @@ out_no_pci:
        return ERR_PTR(ret);
 }
 
-void iwl_trans_sync_nmi(struct iwl_trans *trans)
+void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
 {
        unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
 
index 88530d9f4a54ced4e6c8d081cedaf7b0354cde8b..38d11033898716b3e9c1c5fae581c692d4ae44fe 100644 (file)
@@ -965,7 +965,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
                               cmd_str);
                ret = -ETIMEDOUT;
 
-               iwl_trans_sync_nmi(trans);
+               iwl_trans_pcie_sync_nmi(trans);
                goto cancel;
        }
 
index 9fbd37d23e851caf0042ef2861263815893969cb..7be73e2c4681cadc48ed5a838068d419196b413f 100644 (file)
@@ -1960,7 +1960,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
                               iwl_get_cmd_string(trans, cmd->id));
                ret = -ETIMEDOUT;
 
-               iwl_trans_sync_nmi(trans);
+               iwl_trans_pcie_sync_nmi(trans);
                goto cancel;
        }
 
index 0838af04d681a3e37f71b8d47cb58435ffbac5fb..524eb580599571c9c4e4ee782d760e0287389ddd 100644 (file)
@@ -2644,7 +2644,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
        enum nl80211_band band;
        const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
        struct net *net;
-       int idx;
+       int idx, i;
        int n_limits = 0;
 
        if (WARN_ON(param->channels > 1 && !param->use_chanctx))
@@ -2768,12 +2768,23 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
                goto failed_hw;
        }
 
+       data->if_combination.max_interfaces = 0;
+       for (i = 0; i < n_limits; i++)
+               data->if_combination.max_interfaces +=
+                       data->if_limits[i].max;
+
        data->if_combination.n_limits = n_limits;
-       data->if_combination.max_interfaces = 2048;
        data->if_combination.limits = data->if_limits;
 
-       hw->wiphy->iface_combinations = &data->if_combination;
-       hw->wiphy->n_iface_combinations = 1;
+       /*
+        * If we actually were asked to support combinations,
+        * advertise them - if there's only a single thing like
+        * only IBSS then don't advertise it as combinations.
+        */
+       if (data->if_combination.max_interfaces > 1) {
+               hw->wiphy->iface_combinations = &data->if_combination;
+               hw->wiphy->n_iface_combinations = 1;
+       }
 
        if (param->ciphers) {
                memcpy(data->ciphers, param->ciphers,
index d54dda67d036c19cffce6bc30765c39dc93ee326..3af45949e868909e3073335cc302411c5e6c9761 100644 (file)
@@ -510,6 +510,8 @@ int mt7603_register_device(struct mt7603_dev *dev)
        bus_ops->rmw = mt7603_rmw;
        dev->mt76.bus = bus_ops;
 
+       spin_lock_init(&dev->ps_lock);
+
        INIT_DELAYED_WORK(&dev->mac_work, mt7603_mac_work);
        tasklet_init(&dev->pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet,
                     (unsigned long)dev);
index 5e31d7da96fc88e5fab246c61ec1d37a328a8700..5abc02b578185a6467571f549987dd147e2b3d3b 100644 (file)
@@ -343,7 +343,7 @@ void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid)
                 MT_BA_CONTROL_1_RESET));
 }
 
-void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
+void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
                            int ba_size)
 {
        u32 addr = mt7603_wtbl2_addr(wcid);
@@ -358,43 +358,6 @@ void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
                mt76_clear(dev, addr + (15 * 4), tid_mask);
                return;
        }
-       mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
-
-       mt7603_mac_stop(dev);
-       switch (tid) {
-       case 0:
-               mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID0_SN, ssn);
-               break;
-       case 1:
-               mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID1_SN, ssn);
-               break;
-       case 2:
-               mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID2_SN_LO,
-                              ssn);
-               mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID2_SN_HI,
-                              ssn >> 8);
-               break;
-       case 3:
-               mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID3_SN, ssn);
-               break;
-       case 4:
-               mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID4_SN, ssn);
-               break;
-       case 5:
-               mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID5_SN_LO,
-                              ssn);
-               mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID5_SN_HI,
-                              ssn >> 4);
-               break;
-       case 6:
-               mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID6_SN, ssn);
-               break;
-       case 7:
-               mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID7_SN, ssn);
-               break;
-       }
-       mt7603_wtbl_update(dev, wcid, MT_WTBL_UPDATE_WTBL2);
-       mt7603_mac_start(dev);
 
        for (i = 7; i > 0; i--) {
                if (ba_size >= MT_AGG_SIZE_LIMIT(i))
@@ -827,6 +790,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_tx_rate *rate = &info->control.rates[0];
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
        struct ieee80211_vif *vif = info->control.vif;
        struct mt7603_vif *mvif;
        int wlan_idx;
@@ -834,6 +798,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
        int tx_count = 8;
        u8 frame_type, frame_subtype;
        u16 fc = le16_to_cpu(hdr->frame_control);
+       u16 seqno = 0;
        u8 vif_idx = 0;
        u32 val;
        u8 bw;
@@ -919,7 +884,17 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
                tx_count = 0x1f;
 
        val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) |
-             FIELD_PREP(MT_TXD3_SEQ, le16_to_cpu(hdr->seq_ctrl));
+                 MT_TXD3_SN_VALID;
+
+       if (ieee80211_is_data_qos(hdr->frame_control))
+               seqno = le16_to_cpu(hdr->seq_ctrl);
+       else if (ieee80211_is_back_req(hdr->frame_control))
+               seqno = le16_to_cpu(bar->start_seq_num);
+       else
+               val &= ~MT_TXD3_SN_VALID;
+
+       val |= FIELD_PREP(MT_TXD3_SEQ, seqno >> 4);
+
        txwi[3] = cpu_to_le32(val);
 
        if (key) {
index cc0fe0933b2d8043e622f1b513817b6528bbcaae..a3c4ef198bfeea965fb3f8d71e9d622cc546bb1a 100644 (file)
@@ -372,7 +372,7 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
        struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
        struct sk_buff_head list;
 
-       mt76_stop_tx_queues(&dev->mt76, sta, false);
+       mt76_stop_tx_queues(&dev->mt76, sta, true);
        mt7603_wtbl_set_ps(dev, msta, ps);
        if (ps)
                return;
@@ -584,13 +584,13 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        case IEEE80211_AMPDU_TX_OPERATIONAL:
                mtxq->aggr = true;
                mtxq->send_bar = false;
-               mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, ba_size);
+               mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, ba_size);
                break;
        case IEEE80211_AMPDU_TX_STOP_FLUSH:
        case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
                mtxq->aggr = false;
                ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
-               mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1);
+               mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
                break;
        case IEEE80211_AMPDU_TX_START:
                mtxq->agg_ssn = *ssn << 4;
@@ -598,7 +598,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                break;
        case IEEE80211_AMPDU_TX_STOP_CONT:
                mtxq->aggr = false;
-               mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1);
+               mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
                ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
                break;
        }
index 79f3324294328b0f5b842a98ea23115b96470ca2..6049f3b7c8fec429de86329d35662c4659f711ee 100644 (file)
@@ -200,7 +200,7 @@ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval);
 int mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb);
 void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data);
 void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid);
-void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
+void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
                            int ba_size);
 
 void mt7603_pse_client_reset(struct mt7603_dev *dev);
index 9ed231abe91676119d751b06cfa995a7f5dd716c..4fe5a83ca5a41713d894a4210fe5ef0d68e47e17 100644 (file)
@@ -466,7 +466,6 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
                return;
 
        rcu_read_lock();
-       mt76_tx_status_lock(mdev, &list);
 
        if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid))
                wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
@@ -479,6 +478,8 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
                                          drv_priv);
        }
 
+       mt76_tx_status_lock(mdev, &list);
+
        if (wcid) {
                if (stat->pktid >= MT_PACKET_ID_FIRST)
                        status.skb = mt76_tx_status_skb_get(mdev, wcid,
@@ -498,7 +499,9 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
                if (*update == 0 && stat_val == stat_cache &&
                    stat->wcid == msta->status.wcid && msta->n_frames < 32) {
                        msta->n_frames++;
-                       goto out;
+                       mt76_tx_status_unlock(mdev, &list);
+                       rcu_read_unlock();
+                       return;
                }
 
                mt76x02_mac_fill_tx_status(dev, status.info, &msta->status,
@@ -514,11 +517,10 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
 
        if (status.skb)
                mt76_tx_status_skb_done(mdev, status.skb, &list);
-       else
-               ieee80211_tx_status_ext(mt76_hw(dev), &status);
-
-out:
        mt76_tx_status_unlock(mdev, &list);
+
+       if (!status.skb)
+               ieee80211_tx_status_ext(mt76_hw(dev), &status);
        rcu_read_unlock();
 }
 
index 4b1744e9fb78a08c59fe0ac71d0d9962ae6761be..50b92ca92bd75c33d783ed9bfdf0f01f7d5ce0ae 100644 (file)
@@ -673,7 +673,6 @@ enum rt2x00_state_flags {
        CONFIG_CHANNEL_HT40,
        CONFIG_POWERSAVING,
        CONFIG_HT_DISABLED,
-       CONFIG_QOS_DISABLED,
        CONFIG_MONITORING,
 
        /*
index 2825560e2424dbc766c5d5489491ff7dc67c5211..e8462f25d2522c4dbe95215b3de0279213cdc2b4 100644 (file)
@@ -642,18 +642,8 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
                        rt2x00dev->intf_associated--;
 
                rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated);
-
-               clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
        }
 
-       /*
-        * Check for access point which do not support 802.11e . We have to
-        * generate data frames sequence number in S/W for such AP, because
-        * of H/W bug.
-        */
-       if (changes & BSS_CHANGED_QOS && !bss_conf->qos)
-               set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
-
        /*
         * When the erp information has changed, we should perform
         * additional configuration steps. For all other changes we are done.
index 92ddc19e7bf747a23d0eb24c15b05ff111751754..4834b4eb0206408093a54d47b2a6a5831aa75674 100644 (file)
@@ -201,15 +201,18 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
        if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
                /*
                 * rt2800 has a H/W (or F/W) bug, device incorrectly increase
-                * seqno on retransmited data (non-QOS) frames. To workaround
-                * the problem let's generate seqno in software if QOS is
-                * disabled.
+                * seqno on retransmitted data (non-QOS) and management frames.
+                * To workaround the problem let's generate seqno in software.
+                * Except for beacons which are transmitted periodically by H/W
+                * hence hardware has to assign seqno for them.
                 */
-               if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
-                       __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
-               else
+               if (ieee80211_is_beacon(hdr->frame_control)) {
+                       __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
                        /* H/W will generate sequence number */
                        return;
+               }
+
+               __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
        }
 
        /*
index 8a04c5e029998504d5c07c1c327746ecf324bf78..0f43bb3895669061a3007a2f27709f4441ab44d1 100644 (file)
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * MEI Library for mei bus nfc device access
- *
- * Copyright (C) 2013  Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
+ * Copyright (c) 2013, Intel Corporation.
  *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * MEI Library for mei bus nfc device access
  */
-
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/module.h>
index eb5eddf1794e21598eb27f0bd93df5cc65f91842..5dad8847a9b35a7156bf6470e9679dcfc9458215 100644 (file)
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * HCI based Driver for Inside Secure microread NFC Chip
- *
- * Copyright (C) 2013  Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
+ * Copyright (C) 2013 Intel Corporation. All rights reserved.
  *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * HCI based Driver for Inside Secure microread NFC Chip
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
index ad57a8ec00d654224d2be29ed65669d938b5c0b4..579bc599f545b45c03a45bfb42b6674301895f42 100644 (file)
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * HCI based Driver for NXP pn544 NFC Chip
- *
  * Copyright (C) 2013  Intel Corporation. All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * HCI based Driver for NXP pn544 NFC Chip
  */
 
 #include <linux/module.h>
index b72a303176c70962e04f8304a816c78f812512c1..9486acc08402db3a17079c0ec2589ce445bb23d2 100644 (file)
@@ -198,14 +198,15 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
                return NULL;
 
        nd_btt->id = ida_simple_get(&nd_region->btt_ida, 0, 0, GFP_KERNEL);
-       if (nd_btt->id < 0) {
-               kfree(nd_btt);
-               return NULL;
-       }
+       if (nd_btt->id < 0)
+               goto out_nd_btt;
 
        nd_btt->lbasize = lbasize;
-       if (uuid)
+       if (uuid) {
                uuid = kmemdup(uuid, 16, GFP_KERNEL);
+               if (!uuid)
+                       goto out_put_id;
+       }
        nd_btt->uuid = uuid;
        dev = &nd_btt->dev;
        dev_set_name(dev, "btt%d.%d", nd_region->id, nd_btt->id);
@@ -220,6 +221,13 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
                return NULL;
        }
        return dev;
+
+out_put_id:
+       ida_simple_remove(&nd_region->btt_ida, nd_btt->id);
+
+out_nd_btt:
+       kfree(nd_btt);
+       return NULL;
 }
 
 struct device *nd_btt_create(struct nd_region *nd_region)
index 7849bf1812c47e64f76e16c0ccf8f0ccc6f3bc25..f293556cbbf6d747004b132a23c440296ec760f7 100644 (file)
@@ -2249,9 +2249,12 @@ static struct device *create_namespace_blk(struct nd_region *nd_region,
        if (!nsblk->uuid)
                goto blk_err;
        memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
-       if (name[0])
+       if (name[0]) {
                nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
                                GFP_KERNEL);
+               if (!nsblk->alt_name)
+                       goto blk_err;
+       }
        res = nsblk_add_resource(nd_region, ndd, nsblk,
                        __le64_to_cpu(nd_label->dpa));
        if (!res)
index bc2f700feef8abdad873197237f34f765055c22f..0279eb1da3ef5ae40c5ab80ef6940732dca03bf0 100644 (file)
@@ -113,13 +113,13 @@ static void write_pmem(void *pmem_addr, struct page *page,
 
        while (len) {
                mem = kmap_atomic(page);
-               chunk = min_t(unsigned int, len, PAGE_SIZE);
+               chunk = min_t(unsigned int, len, PAGE_SIZE - off);
                memcpy_flushcache(pmem_addr, mem + off, chunk);
                kunmap_atomic(mem);
                len -= chunk;
                off = 0;
                page++;
-               pmem_addr += PAGE_SIZE;
+               pmem_addr += chunk;
        }
 }
 
@@ -132,7 +132,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
 
        while (len) {
                mem = kmap_atomic(page);
-               chunk = min_t(unsigned int, len, PAGE_SIZE);
+               chunk = min_t(unsigned int, len, PAGE_SIZE - off);
                rem = memcpy_mcsafe(mem + off, pmem_addr, chunk);
                kunmap_atomic(mem);
                if (rem)
@@ -140,7 +140,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
                len -= chunk;
                off = 0;
                page++;
-               pmem_addr += PAGE_SIZE;
+               pmem_addr += chunk;
        }
        return BLK_STS_OK;
 }
index f8bb746a549f7b993dcf61f052acde8303d11cae..a570f2263a424e96908c559750454a086a3df3e2 100644 (file)
@@ -22,6 +22,8 @@ static bool key_revalidate = true;
 module_param(key_revalidate, bool, 0444);
 MODULE_PARM_DESC(key_revalidate, "Require key validation at init.");
 
+static const char zero_key[NVDIMM_PASSPHRASE_LEN];
+
 static void *key_data(struct key *key)
 {
        struct encrypted_key_payload *epayload = dereference_key_locked(key);
@@ -75,6 +77,16 @@ static struct key *nvdimm_request_key(struct nvdimm *nvdimm)
        return key;
 }
 
+static const void *nvdimm_get_key_payload(struct nvdimm *nvdimm,
+               struct key **key)
+{
+       *key = nvdimm_request_key(nvdimm);
+       if (!*key)
+               return zero_key;
+
+       return key_data(*key);
+}
+
 static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
                key_serial_t id, int subclass)
 {
@@ -105,36 +117,57 @@ static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
        return key;
 }
 
-static struct key *nvdimm_key_revalidate(struct nvdimm *nvdimm)
+static const void *nvdimm_get_user_key_payload(struct nvdimm *nvdimm,
+               key_serial_t id, int subclass, struct key **key)
+{
+       *key = NULL;
+       if (id == 0) {
+               if (subclass == NVDIMM_BASE_KEY)
+                       return zero_key;
+               else
+                       return NULL;
+       }
+
+       *key = nvdimm_lookup_user_key(nvdimm, id, subclass);
+       if (!*key)
+               return NULL;
+
+       return key_data(*key);
+}
+
+
+static int nvdimm_key_revalidate(struct nvdimm *nvdimm)
 {
        struct key *key;
        int rc;
+       const void *data;
 
        if (!nvdimm->sec.ops->change_key)
-               return NULL;
+               return -EOPNOTSUPP;
 
-       key = nvdimm_request_key(nvdimm);
-       if (!key)
-               return NULL;
+       data = nvdimm_get_key_payload(nvdimm, &key);
 
        /*
         * Send the same key to the hardware as new and old key to
         * verify that the key is good.
         */
-       rc = nvdimm->sec.ops->change_key(nvdimm, key_data(key),
-                       key_data(key), NVDIMM_USER);
+       rc = nvdimm->sec.ops->change_key(nvdimm, data, data, NVDIMM_USER);
        if (rc < 0) {
                nvdimm_put_key(key);
-               key = NULL;
+               return rc;
        }
-       return key;
+
+       nvdimm_put_key(key);
+       nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER);
+       return 0;
 }
 
 static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
 {
        struct device *dev = &nvdimm->dev;
        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
-       struct key *key = NULL;
+       struct key *key;
+       const void *data;
        int rc;
 
        /* The bus lock should be held at the top level of the call stack */
@@ -160,16 +193,11 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
                if (!key_revalidate)
                        return 0;
 
-               key = nvdimm_key_revalidate(nvdimm);
-               if (!key)
-                       return nvdimm_security_freeze(nvdimm);
+               return nvdimm_key_revalidate(nvdimm);
        } else
-               key = nvdimm_request_key(nvdimm);
+               data = nvdimm_get_key_payload(nvdimm, &key);
 
-       if (!key)
-               return -ENOKEY;
-
-       rc = nvdimm->sec.ops->unlock(nvdimm, key_data(key));
+       rc = nvdimm->sec.ops->unlock(nvdimm, data);
        dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key),
                        rc == 0 ? "success" : "fail");
 
@@ -195,6 +223,7 @@ int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid)
        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
        struct key *key;
        int rc;
+       const void *data;
 
        /* The bus lock should be held at the top level of the call stack */
        lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
@@ -214,11 +243,12 @@ int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid)
                return -EBUSY;
        }
 
-       key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
-       if (!key)
+       data = nvdimm_get_user_key_payload(nvdimm, keyid,
+                       NVDIMM_BASE_KEY, &key);
+       if (!data)
                return -ENOKEY;
 
-       rc = nvdimm->sec.ops->disable(nvdimm, key_data(key));
+       rc = nvdimm->sec.ops->disable(nvdimm, data);
        dev_dbg(dev, "key: %d disable: %s\n", key_serial(key),
                        rc == 0 ? "success" : "fail");
 
@@ -235,6 +265,7 @@ int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid,
        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
        struct key *key, *newkey;
        int rc;
+       const void *data, *newdata;
 
        /* The bus lock should be held at the top level of the call stack */
        lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
@@ -249,22 +280,19 @@ int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid,
                return -EIO;
        }
 
-       if (keyid == 0)
-               key = NULL;
-       else {
-               key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
-               if (!key)
-                       return -ENOKEY;
-       }
+       data = nvdimm_get_user_key_payload(nvdimm, keyid,
+                       NVDIMM_BASE_KEY, &key);
+       if (!data)
+               return -ENOKEY;
 
-       newkey = nvdimm_lookup_user_key(nvdimm, new_keyid, NVDIMM_NEW_KEY);
-       if (!newkey) {
+       newdata = nvdimm_get_user_key_payload(nvdimm, new_keyid,
+                       NVDIMM_NEW_KEY, &newkey);
+       if (!newdata) {
                nvdimm_put_key(key);
                return -ENOKEY;
        }
 
-       rc = nvdimm->sec.ops->change_key(nvdimm, key ? key_data(key) : NULL,
-                       key_data(newkey), pass_type);
+       rc = nvdimm->sec.ops->change_key(nvdimm, data, newdata, pass_type);
        dev_dbg(dev, "key: %d %d update%s: %s\n",
                        key_serial(key), key_serial(newkey),
                        pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
@@ -286,8 +314,9 @@ int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid,
 {
        struct device *dev = &nvdimm->dev;
        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
-       struct key *key;
+       struct key *key = NULL;
        int rc;
+       const void *data;
 
        /* The bus lock should be held at the top level of the call stack */
        lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
@@ -319,11 +348,12 @@ int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid,
                return -EOPNOTSUPP;
        }
 
-       key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
-       if (!key)
+       data = nvdimm_get_user_key_payload(nvdimm, keyid,
+                       NVDIMM_BASE_KEY, &key);
+       if (!data)
                return -ENOKEY;
 
-       rc = nvdimm->sec.ops->erase(nvdimm, key_data(key), pass_type);
+       rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type);
        dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key),
                        pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
                        rc == 0 ? "success" : "fail");
@@ -337,8 +367,9 @@ int nvdimm_security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
 {
        struct device *dev = &nvdimm->dev;
        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
-       struct key *key;
+       struct key *key = NULL;
        int rc;
+       const void *data;
 
        /* The bus lock should be held at the top level of the call stack */
        lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
@@ -368,15 +399,12 @@ int nvdimm_security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
                return -EBUSY;
        }
 
-       if (keyid == 0)
-               key = NULL;
-       else {
-               key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
-               if (!key)
-                       return -ENOKEY;
-       }
+       data = nvdimm_get_user_key_payload(nvdimm, keyid,
+                       NVDIMM_BASE_KEY, &key);
+       if (!data)
+               return -ENOKEY;
 
-       rc = nvdimm->sec.ops->overwrite(nvdimm, key ? key_data(key) : NULL);
+       rc = nvdimm->sec.ops->overwrite(nvdimm, data);
        dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key),
                        rc == 0 ? "success" : "fail");
 
index 470601980794edd9ebd803199587c62f0586fb03..2c43e12b70afccfb424e62d5099339e5056173e9 100644 (file)
@@ -288,7 +288,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
                                "Cancelling I/O %d", req->tag);
 
        nvme_req(req)->status = NVME_SC_ABORT_REQ;
-       blk_mq_complete_request(req);
+       blk_mq_complete_request_sync(req);
        return true;
 }
 EXPORT_SYMBOL_GPL(nvme_cancel_request);
index f3b9d91ba0dfd30ba7c4c3f554e14ea860c389b7..6d8451356eaca468742ecf335ee20763d6f73876 100644 (file)
@@ -1845,7 +1845,7 @@ nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
        memset(queue, 0, sizeof(*queue));
        queue->ctrl = ctrl;
        queue->qnum = idx;
-       atomic_set(&queue->csn, 1);
+       atomic_set(&queue->csn, 0);
        queue->dev = ctrl->dev;
 
        if (idx > 0)
@@ -1887,7 +1887,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
         */
 
        queue->connection_id = 0;
-       atomic_set(&queue->csn, 1);
+       atomic_set(&queue->csn, 0);
 }
 
 static void
@@ -2183,7 +2183,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
 {
        struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
        struct nvme_command *sqe = &cmdiu->sqe;
-       u32 csn;
        int ret, opstate;
 
        /*
@@ -2198,8 +2197,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
 
        /* format the FC-NVME CMD IU and fcp_req */
        cmdiu->connection_id = cpu_to_be64(queue->connection_id);
-       csn = atomic_inc_return(&queue->csn);
-       cmdiu->csn = cpu_to_be32(csn);
        cmdiu->data_len = cpu_to_be32(data_len);
        switch (io_dir) {
        case NVMEFC_FCP_WRITE:
@@ -2257,11 +2254,24 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
        if (!(op->flags & FCOP_FLAGS_AEN))
                blk_mq_start_request(op->rq);
 
+       cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
        ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
                                        &ctrl->rport->remoteport,
                                        queue->lldd_handle, &op->fcp_req);
 
        if (ret) {
+               /*
+                * If the lld fails to send the command is there an issue with
+                * the csn value?  If the command that fails is the Connect,
+                * no - as the connection won't be live.  If it is a command
+                * post-connect, it's possible a gap in csn may be created.
+                * Does this matter?  As Linux initiators don't send fused
+                * commands, no.  The gap would exist, but as there's nothing
+                * that depends on csn order to be delivered on the target
+                * side, it shouldn't hurt.  It would be difficult for a
+                * target to even detect the csn gap as it has no idea when the
+                * cmd with the csn was supposed to arrive.
+                */
                opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
                __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
 
index 76250181fee0555b2e576651ee67349ecb776392..9f72d515fc4b30a3785b396910660074ad076cf4 100644 (file)
@@ -24,6 +24,11 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd)
        return len;
 }
 
+u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
+{
+       return le64_to_cpu(cmd->get_log_page.lpo);
+}
+
 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
 {
        nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
index c872b47a88f31722b358e219c403f2c2cb765988..33ed95e72d6b19598f76df0c50f6fccfdaec37bd 100644 (file)
@@ -131,54 +131,76 @@ static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port
                memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
 }
 
+static size_t discovery_log_entries(struct nvmet_req *req)
+{
+       struct nvmet_ctrl *ctrl = req->sq->ctrl;
+       struct nvmet_subsys_link *p;
+       struct nvmet_port *r;
+       size_t entries = 0;
+
+       list_for_each_entry(p, &req->port->subsystems, entry) {
+               if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
+                       continue;
+               entries++;
+       }
+       list_for_each_entry(r, &req->port->referrals, entry)
+               entries++;
+       return entries;
+}
+
 static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
 {
        const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
        struct nvmet_ctrl *ctrl = req->sq->ctrl;
        struct nvmf_disc_rsp_page_hdr *hdr;
+       u64 offset = nvmet_get_log_page_offset(req->cmd);
        size_t data_len = nvmet_get_log_page_len(req->cmd);
-       size_t alloc_len = max(data_len, sizeof(*hdr));
-       int residual_len = data_len - sizeof(*hdr);
+       size_t alloc_len;
        struct nvmet_subsys_link *p;
        struct nvmet_port *r;
        u32 numrec = 0;
        u16 status = 0;
+       void *buffer;
+
+       /* Spec requires dword aligned offsets */
+       if (offset & 0x3) {
+               status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+               goto out;
+       }
 
        /*
         * Make sure we're passing at least a buffer of response header size.
         * If host provided data len is less than the header size, only the
         * number of bytes requested by host will be sent to host.
         */
-       hdr = kzalloc(alloc_len, GFP_KERNEL);
-       if (!hdr) {
+       down_read(&nvmet_config_sem);
+       alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req);
+       buffer = kzalloc(alloc_len, GFP_KERNEL);
+       if (!buffer) {
+               up_read(&nvmet_config_sem);
                status = NVME_SC_INTERNAL;
                goto out;
        }
 
-       down_read(&nvmet_config_sem);
+       hdr = buffer;
        list_for_each_entry(p, &req->port->subsystems, entry) {
+               char traddr[NVMF_TRADDR_SIZE];
+
                if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
                        continue;
-               if (residual_len >= entry_size) {
-                       char traddr[NVMF_TRADDR_SIZE];
-
-                       nvmet_set_disc_traddr(req, req->port, traddr);
-                       nvmet_format_discovery_entry(hdr, req->port,
-                                       p->subsys->subsysnqn, traddr,
-                                       NVME_NQN_NVME, numrec);
-                       residual_len -= entry_size;
-               }
+
+               nvmet_set_disc_traddr(req, req->port, traddr);
+               nvmet_format_discovery_entry(hdr, req->port,
+                               p->subsys->subsysnqn, traddr,
+                               NVME_NQN_NVME, numrec);
                numrec++;
        }
 
        list_for_each_entry(r, &req->port->referrals, entry) {
-               if (residual_len >= entry_size) {
-                       nvmet_format_discovery_entry(hdr, r,
-                                       NVME_DISC_SUBSYS_NAME,
-                                       r->disc_addr.traddr,
-                                       NVME_NQN_DISC, numrec);
-                       residual_len -= entry_size;
-               }
+               nvmet_format_discovery_entry(hdr, r,
+                               NVME_DISC_SUBSYS_NAME,
+                               r->disc_addr.traddr,
+                               NVME_NQN_DISC, numrec);
                numrec++;
        }
 
@@ -190,8 +212,8 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
 
        up_read(&nvmet_config_sem);
 
-       status = nvmet_copy_to_sgl(req, 0, hdr, data_len);
-       kfree(hdr);
+       status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len);
+       kfree(buffer);
 out:
        nvmet_req_complete(req, status);
 }
index 51e49efd7849df640b5e7cb9fa9715ada7d373e4..1653d19b187fd5de826875cdcf5675c8fcb4431c 100644 (file)
@@ -428,6 +428,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
 
 u32 nvmet_get_log_page_len(struct nvme_command *cmd);
+u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
 
 extern struct list_head *nvmet_ports;
 void nvmet_port_disc_changed(struct nvmet_port *port,
index 1be571c20062c53341e141791b7137bae129ec6e..6bad04cbb1d37b8e9a6227e8d05eca6ce8545642 100644 (file)
 #define DBG_IRT(x...)
 #endif
 
+#ifdef CONFIG_64BIT
+#define COMPARE_IRTE_ADDR(irte, hpa)   ((irte)->dest_iosapic_addr == (hpa))
+#else
 #define COMPARE_IRTE_ADDR(irte, hpa)   \
-               ((irte)->dest_iosapic_addr == F_EXTEND(hpa))
+               ((irte)->dest_iosapic_addr == ((hpa) | 0xffffffff00000000ULL))
+#endif
 
 #define IOSAPIC_REG_SELECT              0x00
 #define IOSAPIC_REG_WINDOW              0x10
index e9b52e4a4648f7132185eb4258a31b2d98b08e08..e77044c2bf622db01d798062f28d03221238df42 100644 (file)
@@ -158,8 +158,9 @@ static int parport_config(struct pcmcia_device *link)
     return 0;
 
 failed:
-    parport_cs_release(link);
-    return -ENODEV;
+       parport_cs_release(link);
+       kfree(link->priv);
+       return -ENODEV;
 } /* parport_config */
 
 static void parport_cs_release(struct pcmcia_device *link)
index 3f3df4c29f6e1d40112343e91f902735a6b4d535..905282a8ddaaeda2f8df06570bfb3716e2b2479c 100644 (file)
@@ -115,6 +115,10 @@ static void remove_board(struct controller *ctrl, bool safe_removal)
                 * removed from the slot/adapter.
                 */
                msleep(1000);
+
+               /* Ignore link or presence changes caused by power off */
+               atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
+                          &ctrl->pending_events);
        }
 
        /* turn off Green LED */
index a59ad09ce911d564c074930ea22968fcfab928e7..a077f67fe1dac17508d09e954cf4e5acead355d6 100644 (file)
@@ -3877,6 +3877,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
                         quirk_dma_func1_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
+                        quirk_dma_func1_alias);
 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
                         quirk_dma_func1_alias);
index 8f018b3f3cd4c42ef40764d39587e7e2e11285d1..c7039f52ad51802afa773525af8eed45e5438ac1 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <linux/debugfs.h>
 #include <linux/device.h>
+#include <linux/dmi.h>
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/platform_data/x86/clk-pmc-atom.h>
@@ -391,11 +392,27 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc)
 }
 #endif /* CONFIG_DEBUG_FS */
 
+/*
+ * Some systems need one or more of their pmc_plt_clks to be
+ * marked as critical.
+ */
+static const struct dmi_system_id critclk_systems[] = {
+       {
+               .ident = "MPL CEC1x",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "MPL AG"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "CEC10 Family"),
+               },
+       },
+       { /*sentinel*/ }
+};
+
 static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
                          const struct pmc_data *pmc_data)
 {
        struct platform_device *clkdev;
        struct pmc_clk_data *clk_data;
+       const struct dmi_system_id *d = dmi_first_match(critclk_systems);
 
        clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
        if (!clk_data)
@@ -403,6 +420,10 @@ static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
 
        clk_data->base = pmc_regmap; /* offset is added by client */
        clk_data->clks = pmc_data->clks;
+       if (d) {
+               clk_data->critical = true;
+               pr_info("%s critclks quirk enabled\n", d->ident);
+       }
 
        clkdev = platform_device_register_data(&pdev->dev, "clk-pmc-atom",
                                               PLATFORM_DEVID_NONE,
index ad969d9fc9815a173385588e034a1c650ba6c868..c2644a9fe80f1f1432e6e62ce6cbb7d8fbf0986b 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Power supply driver for the goldfish emulator
  *
index 91751617b37af33b6241be4d13647775d5444881..c53a2185a0393c689c631d7df7f1b760cb2b72e0 100644 (file)
@@ -130,6 +130,7 @@ static int meson_audio_arb_probe(struct platform_device *pdev)
        arb->rstc.nr_resets = ARRAY_SIZE(axg_audio_arb_reset_bits);
        arb->rstc.ops = &meson_audio_arb_rstc_ops;
        arb->rstc.of_node = dev->of_node;
+       arb->rstc.owner = THIS_MODULE;
 
        /*
         * Enable general :
index a71734c416939354129253af00090975ff0f1b5e..f933c06bff4f804a3e77408d51fe15606b62e135 100644 (file)
@@ -667,9 +667,9 @@ config RTC_DRV_S5M
          will be called rtc-s5m.
 
 config RTC_DRV_SD3078
-    tristate "ZXW Crystal SD3078"
+    tristate "ZXW Shenzhen whwave SD3078"
     help
-      If you say yes here you get support for the ZXW Crystal
+      If you say yes here you get support for the ZXW Shenzhen whwave
       SD3078 RTC chips.
 
       This driver can also be built as a module. If so, the module
index e5444296075ee147e74c93d35d3eb98e6a2b0c48..4d6bf9304ceb35932dfadbc921b2e658e5e3d2ec 100644 (file)
@@ -298,7 +298,7 @@ static int cros_ec_rtc_suspend(struct device *dev)
        struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev);
 
        if (device_may_wakeup(dev))
-               enable_irq_wake(cros_ec_rtc->cros_ec->irq);
+               return enable_irq_wake(cros_ec_rtc->cros_ec->irq);
 
        return 0;
 }
@@ -309,7 +309,7 @@ static int cros_ec_rtc_resume(struct device *dev)
        struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev);
 
        if (device_may_wakeup(dev))
-               disable_irq_wake(cros_ec_rtc->cros_ec->irq);
+               return disable_irq_wake(cros_ec_rtc->cros_ec->irq);
 
        return 0;
 }
index b4e054c64bad9e54d23adb3a47da1008224906b3..69b54e5556c06234c5339431f3149bc923ebcf49 100644 (file)
@@ -480,6 +480,13 @@ static int da9063_rtc_probe(struct platform_device *pdev)
        da9063_data_to_tm(data, &rtc->alarm_time, rtc);
        rtc->rtc_sync = false;
 
+       /*
+        * TODO: some models have alarms on a minute boundary but still support
+        * real hardware interrupts. Add this once the core supports it.
+        */
+       if (config->rtc_data_start != RTC_SEC)
+               rtc->rtc_dev->uie_unsupported = 1;
+
        irq_alarm = platform_get_irq_byname(pdev, "ALARM");
        ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
                                        da9063_alarm_event,
index d417b203cbc553eb25ab0cbf0eb493bcd84c9d46..1d3de2a3d1a4d7a0ad5a7d327efaaca0d72f468c 100644 (file)
@@ -374,7 +374,7 @@ static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm)
 static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off)
 {
        unsigned int byte;
-       int value = 0xff;       /* return 0xff for ignored values */
+       int value = -1;                 /* return -1 for ignored values */
 
        byte = readb(rtc->regbase + reg_off);
        if (byte & AR_ENB) {
index 6e294b4d3635fe399586f05045297646d9c8c574..f89f9d02e7884f321f858f18a020e122d83c8a03 100644 (file)
@@ -2004,14 +2004,14 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
        blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
 
 raw:
-       block->blocks = (private->real_cyl *
+       block->blocks = ((unsigned long) private->real_cyl *
                          private->rdc_data.trk_per_cyl *
                          blk_per_trk);
 
        dev_info(&device->cdev->dev,
-                "DASD with %d KB/block, %d KB total size, %d KB/track, "
+                "DASD with %u KB/block, %lu KB total size, %u KB/track, "
                 "%s\n", (block->bp_block >> 10),
-                ((private->real_cyl *
+                (((unsigned long) private->real_cyl *
                   private->rdc_data.trk_per_cyl *
                   blk_per_trk * (block->bp_block >> 9)) >> 1),
                 ((blk_per_trk * block->bp_block) >> 10),
index fd2146bcc0add9aae3b71ba4cc88b788b7702591..e17364e13d2f71ec289a47f6a79f7c56ae85b264 100644 (file)
@@ -629,7 +629,7 @@ con3270_init(void)
                     (void (*)(unsigned long)) con3270_read_tasklet,
                     (unsigned long) condev->read);
 
-       raw3270_add_view(&condev->view, &con3270_fn, 1);
+       raw3270_add_view(&condev->view, &con3270_fn, 1, RAW3270_VIEW_LOCK_IRQ);
 
        INIT_LIST_HEAD(&condev->freemem);
        for (i = 0; i < CON3270_STRING_PAGES; i++) {
index 8f3a2eeb28dca0b579d2d773057296e92f379342..8b48ba9c598ecedcac5ca78c86f97d3587e71c7d 100644 (file)
@@ -463,7 +463,8 @@ fs3270_open(struct inode *inode, struct file *filp)
 
        init_waitqueue_head(&fp->wait);
        fp->fs_pid = get_pid(task_pid(current));
-       rc = raw3270_add_view(&fp->view, &fs3270_fn, minor);
+       rc = raw3270_add_view(&fp->view, &fs3270_fn, minor,
+                             RAW3270_VIEW_LOCK_BH);
        if (rc) {
                fs3270_free_view(&fp->view);
                goto out;
index f8cd2935fbfd48c5aef1ad980457cc55433b6db4..63a41b16876102a8f1210396f1970d0d5e77df18 100644 (file)
@@ -920,7 +920,7 @@ raw3270_deactivate_view(struct raw3270_view *view)
  * Add view to device with minor "minor".
  */
 int
-raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
+raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor, int subclass)
 {
        unsigned long flags;
        struct raw3270 *rp;
@@ -942,6 +942,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
                view->cols = rp->cols;
                view->ascebc = rp->ascebc;
                spin_lock_init(&view->lock);
+               lockdep_set_subclass(&view->lock, subclass);
                list_add(&view->list, &rp->view_list);
                rc = 0;
                spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
index 114ca7cbf8897dce734e59cb283923e2c160b3bf..3afaa35f73513cba47566e9601b775339e6cdf78 100644 (file)
@@ -150,6 +150,8 @@ struct raw3270_fn {
 struct raw3270_view {
        struct list_head list;
        spinlock_t lock;
+#define RAW3270_VIEW_LOCK_IRQ  0
+#define RAW3270_VIEW_LOCK_BH   1
        atomic_t ref_count;
        struct raw3270 *dev;
        struct raw3270_fn *fn;
@@ -158,7 +160,7 @@ struct raw3270_view {
        unsigned char *ascebc;          /* ascii -> ebcdic table */
 };
 
-int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int);
+int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int, int);
 int raw3270_activate_view(struct raw3270_view *);
 void raw3270_del_view(struct raw3270_view *);
 void raw3270_deactivate_view(struct raw3270_view *);
index 2b0c36c2c5688ebf6ef0266d66cad52793b7ae1b..98d7fc152e32f85e8e53e1e56b26244753c67a00 100644 (file)
@@ -980,7 +980,8 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
                return PTR_ERR(tp);
 
        rc = raw3270_add_view(&tp->view, &tty3270_fn,
-                             tty->index + RAW3270_FIRSTMINOR);
+                             tty->index + RAW3270_FIRSTMINOR,
+                             RAW3270_VIEW_LOCK_BH);
        if (rc) {
                tty3270_free_view(tp);
                return rc;
index 6a340f2c355693170776992c6a1d018e78d6ee96..5ea83dc4f1d740e9db1288ed9d8f70423312d5ba 100644 (file)
@@ -751,8 +751,8 @@ void ap_queue_prepare_remove(struct ap_queue *aq)
        __ap_flush_queue(aq);
        /* set REMOVE state to prevent new messages are queued in */
        aq->state = AP_STATE_REMOVE;
-       del_timer_sync(&aq->timeout);
        spin_unlock_bh(&aq->lock);
+       del_timer_sync(&aq->timeout);
 }
 
 void ap_queue_remove(struct ap_queue *aq)
index 3e85d665c572957aa491917b1433b5254812b0f6..45eb0c14b8807d17c228ef563506e82b6a50d533 100644 (file)
@@ -51,7 +51,8 @@ static debug_info_t *debug_info;
 
 static void __init pkey_debug_init(void)
 {
-       debug_info = debug_register("pkey", 1, 1, 4 * sizeof(long));
+       /* 5 arguments per dbf entry (including the format string ptr) */
+       debug_info = debug_register("pkey", 1, 1, 5 * sizeof(long));
        debug_register_view(debug_info, &debug_sprintf_view);
        debug_set_level(debug_info, 3);
 }
index 3d401d02c01955bc02304fe4f761993e73eaad46..bdd177e3d76229bae1fb67003a9045a3f3e26c87 100644 (file)
@@ -91,6 +91,7 @@ aic7770_probe(struct device *dev)
        ahc = ahc_alloc(&aic7xxx_driver_template, name);
        if (ahc == NULL)
                return (ENOMEM);
+       ahc->dev = dev;
        error = aic7770_config(ahc, aic7770_ident_table + edev->id.driver_data,
                               eisaBase);
        if (error != 0) {
index 5614921b4041acf4a10c004646d93fa1aa0ebff6..88b90f9806c99d04cd07fa632e243e5b5528d651 100644 (file)
@@ -943,6 +943,7 @@ struct ahc_softc {
         * Platform specific device information.
         */
        ahc_dev_softc_t           dev_softc;
+       struct device             *dev;
 
        /*
         * Bus specific device information.
index 3c9c17450bb399b0a9885270c2070bfb7fa6b24c..d5c4a0d2370620afe5a0fe3ad39bd44025c14429 100644 (file)
@@ -860,8 +860,8 @@ int
 ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr,
                 int flags, bus_dmamap_t *mapp)
 {
-       *vaddr = pci_alloc_consistent(ahc->dev_softc,
-                                     dmat->maxsize, mapp);
+       /* XXX: check if we really need the GFP_ATOMIC and unwind this mess! */
+       *vaddr = dma_alloc_coherent(ahc->dev, dmat->maxsize, mapp, GFP_ATOMIC);
        if (*vaddr == NULL)
                return ENOMEM;
        return 0;
@@ -871,8 +871,7 @@ void
 ahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat,
                void* vaddr, bus_dmamap_t map)
 {
-       pci_free_consistent(ahc->dev_softc, dmat->maxsize,
-                           vaddr, map);
+       dma_free_coherent(ahc->dev, dmat->maxsize, vaddr, map);
 }
 
 int
@@ -1123,8 +1122,7 @@ ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *templa
 
        host->transportt = ahc_linux_transport_template;
 
-       retval = scsi_add_host(host,
-                       (ahc->dev_softc ? &ahc->dev_softc->dev : NULL));
+       retval = scsi_add_host(host, ahc->dev);
        if (retval) {
                printk(KERN_WARNING "aic7xxx: scsi_add_host failed\n");
                scsi_host_put(host);
index 0fc14dac7070ce6bab629c08a2998638fc26553e..717d8d1082ce18ae9899870e43238c1443fdb36f 100644 (file)
@@ -250,6 +250,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                }
        }
        ahc->dev_softc = pci;
+       ahc->dev = &pci->dev;
        error = ahc_pci_config(ahc, entry);
        if (error != 0) {
                ahc_free(ahc);
index 462560b2855e25e1204064c89350fed5653bf36a..469d0bc9f5fe4db6e756e4270bf522e63cd15566 100644 (file)
@@ -1713,8 +1713,11 @@ csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req)
        }
 
 out:
-       if (req->nsge > 0)
+       if (req->nsge > 0) {
                scsi_dma_unmap(cmnd);
+               if (req->dcopy && (host_status == DID_OK))
+                       host_status = csio_scsi_copy_to_sgl(hw, req);
+       }
 
        cmnd->result = (((host_status) << 16) | scsi_status);
        cmnd->scsi_done(cmnd);
index dfba4921b265a0fa7fdf2409295483a68c5e181c..5bf61431434be73a381fc9d59b6fee3b9441cab7 100644 (file)
@@ -2162,7 +2162,6 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
                FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
                             fc_rport_state(rdata));
 
-               rdata->flags &= ~FC_RP_STARTED;
                fc_rport_enter_delete(rdata, RPORT_EV_STOP);
                mutex_unlock(&rdata->rp_mutex);
                kref_put(&rdata->kref, fc_rport_destroy);
index c98f264f1d83a030ea8a00678fd586ab70059218..a497b2c0cb798e07d240affc72b4621fa1503f34 100644 (file)
@@ -3878,10 +3878,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
         * wake up the thread.
         */
        spin_lock(&lpfc_cmd->buf_lock);
-       if (unlikely(lpfc_cmd->cur_iocbq.iocb_flag & LPFC_DRIVER_ABORTED)) {
-               lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
-               if (lpfc_cmd->waitq)
-                       wake_up(lpfc_cmd->waitq);
+       lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
+       if (lpfc_cmd->waitq) {
+               wake_up(lpfc_cmd->waitq);
                lpfc_cmd->waitq = NULL;
        }
        spin_unlock(&lpfc_cmd->buf_lock);
index e74a62448ba466a58c2365546b5d1fc34bafa8e9..e5db9a9954dc0cd015577686c8bdb5efda9f78de 100644 (file)
@@ -1392,10 +1392,8 @@ static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
 
 static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
 {
-       struct qedi_nvm_iscsi_image nvm_image;
-
        qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev,
-                                              sizeof(nvm_image),
+                                              sizeof(struct qedi_nvm_iscsi_image),
                                               &qedi->nvm_buf_dma, GFP_KERNEL);
        if (!qedi->iscsi_image) {
                QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
@@ -2236,14 +2234,13 @@ static void qedi_boot_release(void *data)
 static int qedi_get_boot_info(struct qedi_ctx *qedi)
 {
        int ret = 1;
-       struct qedi_nvm_iscsi_image nvm_image;
 
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
                  "Get NVM iSCSI CFG image\n");
        ret = qedi_ops->common->nvm_get_image(qedi->cdev,
                                              QED_NVM_IMAGE_ISCSI_CFG,
                                              (char *)qedi->iscsi_image,
-                                             sizeof(nvm_image));
+                                             sizeof(struct qedi_nvm_iscsi_image));
        if (ret)
                QEDI_ERR(&qedi->dbg_ctx,
                         "Could not get NVM image. ret = %d\n", ret);
index c4cbfd07b9167f0e29b635b9b24e65a6df3826d9..a08ff3bd63105141840e774fc0af3081aa78178a 100644 (file)
@@ -238,6 +238,7 @@ static struct {
        {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+       {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
        {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
        {"SONY", "TSL", NULL, BLIST_FORCELUN},          /* DDS3 & DDS4 autoloaders */
index 5a58cbf3a75da9123899ce668934e002933d1416..c14006ac98f91c6bb3c7d360b7bd78720d19df84 100644 (file)
@@ -75,6 +75,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
        {"NETAPP", "INF-01-00",         "rdac", },
        {"LSI", "INF-01-00",            "rdac", },
        {"ENGENIO", "INF-01-00",        "rdac", },
+       {"LENOVO", "DE_Series",         "rdac", },
        {NULL, NULL,                    NULL },
 };
 
index 601b9f1de26758a1d078a69de36469ad4318d39d..07dfc17d48246551a63966444172b4197e30def4 100644 (file)
@@ -1706,8 +1706,12 @@ out_put_budget:
                        ret = BLK_STS_DEV_RESOURCE;
                break;
        default:
+               if (unlikely(!scsi_device_online(sdev)))
+                       scsi_req(req)->result = DID_NO_CONNECT << 16;
+               else
+                       scsi_req(req)->result = DID_ERROR << 16;
                /*
-                * Make sure to release all allocated ressources when
+                * Make sure to release all allocated resources when
                 * we hit an error, as we will never see this command
                 * again.
                 */
index 84380bae20f1ec350d5209931cdca018351199f1..8472de1007fffca12f41823e3ed8e45dac1ee06d 100644 (file)
@@ -385,7 +385,7 @@ enum storvsc_request_type {
  * This is the end of Protocol specific defines.
  */
 
-static int storvsc_ringbuffer_size = (256 * PAGE_SIZE);
+static int storvsc_ringbuffer_size = (128 * 1024);
 static u32 max_outstanding_req_per_channel;
 
 static int storvsc_vcpus_per_sub_channel = 4;
@@ -668,13 +668,22 @@ static void  handle_multichannel_storage(struct hv_device *device, int max_chns)
 {
        struct device *dev = &device->device;
        struct storvsc_device *stor_device;
-       int num_cpus = num_online_cpus();
        int num_sc;
        struct storvsc_cmd_request *request;
        struct vstor_packet *vstor_packet;
        int ret, t;
 
-       num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns);
+       /*
+        * If the number of CPUs is artificially restricted, such as
+        * with maxcpus=1 on the kernel boot line, Hyper-V could offer
+        * sub-channels >= the number of CPUs. These sub-channels
+        * should not be created. The primary channel is already created
+        * and assigned to one CPU, so check against # CPUs - 1.
+        */
+       num_sc = min((int)(num_online_cpus() - 1), max_chns);
+       if (!num_sc)
+               return;
+
        stor_device = get_out_stor_device(device);
        if (!stor_device)
                return;
index 8af01777d09c74f344ad325256dbd30248febe32..f8cb7c23305b7e984e16ba94406f1f338cb17a46 100644 (file)
@@ -793,6 +793,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
 
        /* We need to know how many queues before we allocate. */
        num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
+       num_queues = min_t(unsigned int, nr_cpu_ids, num_queues);
 
        num_targets = virtscsi_config_get(vdev, max_target) + 1;
 
index 808ed92ed66fe4bedfbbba500452d86771e8162e..1bb1cb6513491b805075456e41c45a253e77c83a 100644 (file)
@@ -463,10 +463,8 @@ static int ni6501_alloc_usb_buffers(struct comedi_device *dev)
 
        size = usb_endpoint_maxp(devpriv->ep_tx);
        devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
-       if (!devpriv->usb_tx_buf) {
-               kfree(devpriv->usb_rx_buf);
+       if (!devpriv->usb_tx_buf)
                return -ENOMEM;
-       }
 
        return 0;
 }
@@ -518,6 +516,9 @@ static int ni6501_auto_attach(struct comedi_device *dev,
        if (!devpriv)
                return -ENOMEM;
 
+       mutex_init(&devpriv->mut);
+       usb_set_intfdata(intf, devpriv);
+
        ret = ni6501_find_endpoints(dev);
        if (ret)
                return ret;
@@ -526,9 +527,6 @@ static int ni6501_auto_attach(struct comedi_device *dev,
        if (ret)
                return ret;
 
-       mutex_init(&devpriv->mut);
-       usb_set_intfdata(intf, devpriv);
-
        ret = comedi_alloc_subdevices(dev, 2);
        if (ret)
                return ret;
index 6234b649d887ccb3abac4c73dcb38aa095768600..65dc6c51037e30edf30b1ad7e0d6eea7c0390e86 100644 (file)
@@ -682,10 +682,8 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev)
 
        size = usb_endpoint_maxp(devpriv->ep_tx);
        devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
-       if (!devpriv->usb_tx_buf) {
-               kfree(devpriv->usb_rx_buf);
+       if (!devpriv->usb_tx_buf)
                return -ENOMEM;
-       }
 
        return 0;
 }
@@ -800,6 +798,8 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
 
        devpriv->model = board->model;
 
+       sema_init(&devpriv->limit_sem, 8);
+
        ret = vmk80xx_find_usb_endpoints(dev);
        if (ret)
                return ret;
@@ -808,8 +808,6 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
        if (ret)
                return ret;
 
-       sema_init(&devpriv->limit_sem, 8);
-
        usb_set_intfdata(intf, devpriv);
 
        if (devpriv->model == VMK8055_MODEL)
index 526e0dbea5b5714618b463cb3eab98b0895e99f6..81af768e7248e514699541552e2eb2cd99e1bc5e 100644 (file)
@@ -298,7 +298,7 @@ submit_bio_retry:
        *last_block = current_block;
 
        /* shift in advance in case of it followed by too many gaps */
-       if (unlikely(bio->bi_vcnt >= bio->bi_max_vecs)) {
+       if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
                /* err should reassign to 0 after submitting */
                err = 0;
                goto submit_bio_out;
index acdbc07fd2592c03084a0c6fb6e89aee073f58fd..2fc8bc22b57baa39a3d4a8cd56ae0f3d2d0a0af2 100644 (file)
 #define AD7192_CH_AIN3         BIT(6) /* AIN3 - AINCOM */
 #define AD7192_CH_AIN4         BIT(7) /* AIN4 - AINCOM */
 
-#define AD7193_CH_AIN1P_AIN2M  0x000  /* AIN1(+) - AIN2(-) */
-#define AD7193_CH_AIN3P_AIN4M  0x001  /* AIN3(+) - AIN4(-) */
-#define AD7193_CH_AIN5P_AIN6M  0x002  /* AIN5(+) - AIN6(-) */
-#define AD7193_CH_AIN7P_AIN8M  0x004  /* AIN7(+) - AIN8(-) */
+#define AD7193_CH_AIN1P_AIN2M  0x001  /* AIN1(+) - AIN2(-) */
+#define AD7193_CH_AIN3P_AIN4M  0x002  /* AIN3(+) - AIN4(-) */
+#define AD7193_CH_AIN5P_AIN6M  0x004  /* AIN5(+) - AIN6(-) */
+#define AD7193_CH_AIN7P_AIN8M  0x008  /* AIN7(+) - AIN8(-) */
 #define AD7193_CH_TEMP         0x100 /* Temp senseor */
 #define AD7193_CH_AIN2P_AIN2M  0x200 /* AIN2(+) - AIN2(-) */
 #define AD7193_CH_AIN1         0x401 /* AIN1 - AINCOM */
index 029c3bf42d4d942f2e58c81cfb03292fc8eae0d3..07774c000c5a68db9f7f6c1e93eae1840fac23ec 100644 (file)
@@ -269,7 +269,7 @@ static IIO_DEV_ATTR_VPEAK(0644,
 static IIO_DEV_ATTR_IPEAK(0644,
                ade7854_read_32bit,
                ade7854_write_32bit,
-               ADE7854_VPEAK);
+               ADE7854_IPEAK);
 static IIO_DEV_ATTR_APHCAL(0644,
                ade7854_read_16bit,
                ade7854_write_16bit,
index 18936cdb10830ae4506435377a3342bb0c2e076e..956daf8c3bd24f9b1ccce2a254c26ea5e75e7ba9 100644 (file)
@@ -1431,7 +1431,7 @@ int most_register_interface(struct most_interface *iface)
 
        INIT_LIST_HEAD(&iface->p->channel_list);
        iface->p->dev_id = id;
-       snprintf(iface->p->name, STRING_SIZE, "mdev%d", id);
+       strcpy(iface->p->name, iface->description);
        iface->dev.init_name = iface->p->name;
        iface->dev.bus = &mc.bus;
        iface->dev.parent = &mc.dev;
index 09a183dfc52640027bf571184ee4e69e819c5951..a31db15cd7c0d36bf2e4dee32d7b1201bc2674c5 100644 (file)
@@ -1520,11 +1520,13 @@ static int __init sc16is7xx_init(void)
 #endif
        return ret;
 
+#ifdef CONFIG_SERIAL_SC16IS7XX_SPI
 err_spi:
+#endif
 #ifdef CONFIG_SERIAL_SC16IS7XX_I2C
        i2c_del_driver(&sc16is7xx_i2c_uart_driver);
-#endif
 err_i2c:
+#endif
        uart_unregister_driver(&sc16is7xx_uart);
        return ret;
 }
index 2d1c626312cd8892d5eae0fa65e03d3347a09e81..3cd139752d3f70f9dfce1fe2c43f3eab03cf433a 100644 (file)
@@ -2512,14 +2512,16 @@ done:
                         * center of the last stop bit in sampling clocks.
                         */
                        int last_stop = bits * 2 - 1;
-                       int deviation = min_err * srr * last_stop / 2 / baud;
+                       int deviation = DIV_ROUND_CLOSEST(min_err * last_stop *
+                                                         (int)(srr + 1),
+                                                         2 * (int)baud);
 
                        if (abs(deviation) >= 2) {
                                /* At least two sampling clocks off at the
                                 * last stop bit; we can increase the error
                                 * margin by shifting the sampling point.
                                 */
-                               int shift = min(-8, max(7, deviation / 2));
+                               int shift = clamp(deviation / 2, -8, 7);
 
                                hssrr |= (shift << HSCIF_SRHP_SHIFT) &
                                         HSCIF_SRHP_MASK;
index d34984aa646dc4d30813fdfb91290fbef958d0fb..650c66886c80f5d1c9770321949251af17e112a6 100644 (file)
@@ -1520,7 +1520,8 @@ static void csi_J(struct vc_data *vc, int vpar)
                        return;
        }
        scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
-       update_region(vc, (unsigned long) start, count);
+       if (con_should_update(vc))
+               do_update_region(vc, (unsigned long) start, count);
        vc->vc_need_wrap = 0;
 }
 
index 0ee3cd3c25ee28fdfec614d7f2f563c64f7b5873..450e2f5c9b4329ae221030343adca7c4eafef109 100644 (file)
@@ -68,8 +68,8 @@ static ssize_t reg_show(struct device *dev, struct device_attribute *attr,
 static ssize_t reg_store(struct device *dev, struct device_attribute *attr,
                         const char *buf, size_t count);
 
-DEVICE_ATTR(reg_br, S_IRUGO|S_IWUSR|S_IWGRP, reg_show, reg_store);
-DEVICE_ATTR(reg_or, S_IRUGO|S_IWUSR|S_IWGRP, reg_show, reg_store);
+static DEVICE_ATTR(reg_br, 0664, reg_show, reg_store);
+static DEVICE_ATTR(reg_or, 0664, reg_show, reg_store);
 
 static ssize_t reg_show(struct device *dev, struct device_attribute *attr,
                        char *buf)
index a25659b5a5d17d97272589a2dd1ac7979ed55070..3fa20e95a6bb6446fb2c4aa3d71abf75b611ce33 100644 (file)
@@ -1661,11 +1661,11 @@ static void __init vfio_pci_fill_ids(void)
                rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
                                   subvendor, subdevice, class, class_mask, 0);
                if (rc)
-                       pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
+                       pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
                                vendor, device, subvendor, subdevice,
                                class, class_mask, rc);
                else
-                       pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
+                       pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
                                vendor, device, subvendor, subdevice,
                                class, class_mask);
        }
index 8dbb270998f47121dc0886151cb6e47da8a8e211..6b64e45a52691ffd9dd4809d0e127fc8c450cf80 100644 (file)
@@ -1398,7 +1398,7 @@ unlock_exit:
        mutex_unlock(&container->lock);
 }
 
-const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
+static const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
        .name           = "iommu-vfio-powerpc",
        .owner          = THIS_MODULE,
        .open           = tce_iommu_open,
index 73652e21efec6a28393bd979d2d42caef711b280..d0f731c9920a65a44d614181ecf3a4e4c2d90755 100644 (file)
@@ -58,12 +58,18 @@ module_param_named(disable_hugepages,
 MODULE_PARM_DESC(disable_hugepages,
                 "Disable VFIO IOMMU support for IOMMU hugepages.");
 
+static unsigned int dma_entry_limit __read_mostly = U16_MAX;
+module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
+MODULE_PARM_DESC(dma_entry_limit,
+                "Maximum number of user DMA mappings per container (65535).");
+
 struct vfio_iommu {
        struct list_head        domain_list;
        struct vfio_domain      *external_domain; /* domain for external user */
        struct mutex            lock;
        struct rb_root          dma_list;
        struct blocking_notifier_head notifier;
+       unsigned int            dma_avail;
        bool                    v2;
        bool                    nesting;
 };
@@ -836,6 +842,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
        vfio_unlink_dma(iommu, dma);
        put_task_struct(dma->task);
        kfree(dma);
+       iommu->dma_avail++;
 }
 
 static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
@@ -1081,12 +1088,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
                goto out_unlock;
        }
 
+       if (!iommu->dma_avail) {
+               ret = -ENOSPC;
+               goto out_unlock;
+       }
+
        dma = kzalloc(sizeof(*dma), GFP_KERNEL);
        if (!dma) {
                ret = -ENOMEM;
                goto out_unlock;
        }
 
+       iommu->dma_avail--;
        dma->iova = iova;
        dma->vaddr = vaddr;
        dma->prot = prot;
@@ -1583,6 +1596,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
 
        INIT_LIST_HEAD(&iommu->domain_list);
        iommu->dma_list = RB_ROOT;
+       iommu->dma_avail = dma_entry_limit;
        mutex_init(&iommu->lock);
        BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
 
index 5ace833de74620bf1a089186057d766e7e3def63..351af88231ada1145bfb72326f905bfaac3819ca 100644 (file)
@@ -911,8 +911,12 @@ static int vhost_new_umem_range(struct vhost_umem *umem,
                                u64 start, u64 size, u64 end,
                                u64 userspace_addr, int perm)
 {
-       struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
+       struct vhost_umem_node *tmp, *node;
 
+       if (!size)
+               return -EFAULT;
+
+       node = kmalloc(sizeof(*node), GFP_ATOMIC);
        if (!node)
                return -ENOMEM;
 
index d0584c040c60f3a8f1a8b48004ec66074eebdcc8..7a0398bb84f77e520aeb8113c2ac77ef7ef6e0f2 100644 (file)
@@ -255,9 +255,11 @@ void vp_del_vqs(struct virtio_device *vdev)
        for (i = 0; i < vp_dev->msix_used_vectors; ++i)
                free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
 
-       for (i = 0; i < vp_dev->msix_vectors; i++)
-               if (vp_dev->msix_affinity_masks[i])
-                       free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+       if (vp_dev->msix_affinity_masks) {
+               for (i = 0; i < vp_dev->msix_vectors; i++)
+                       if (vp_dev->msix_affinity_masks[i])
+                               free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+       }
 
        if (vp_dev->msix_enabled) {
                /* Disable the vector used for configuration */
index 18846afb39da189e3f0dd0168727e0cf2865549e..5df92c308286dc0f5afe203cf279adb3d8af8185 100644 (file)
@@ -882,6 +882,8 @@ static struct virtqueue *vring_create_virtqueue_split(
                                          GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
                if (queue)
                        break;
+               if (!may_reduce_num)
+                       return NULL;
        }
 
        if (!num)
index 8b5e598ffdb3c8489da413d52c2b3bd07ca92bbe..8f2b25f1614c8d63bc035e14a6ce3e22d2d10745 100644 (file)
@@ -37,6 +37,11 @@ module_param_named(active_pullup, ds2482_active_pullup, int, 0644);
 MODULE_PARM_DESC(active_pullup, "Active pullup (apply to all buses): " \
                                "0-disable, 1-enable (default)");
 
+/* extra configurations - e.g. 1WS */
+static int extra_config;
+module_param(extra_config, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(extra_config, "Extra Configuration settings 1=APU,2=PPM,3=SPU,8=1WS");
+
 /**
  * The DS2482 registers - there are 3 registers that are addressed by a read
  * pointer. The read pointer is set by the last command executed.
@@ -70,8 +75,6 @@ MODULE_PARM_DESC(active_pullup, "Active pullup (apply to all buses): " \
 #define DS2482_REG_CFG_PPM             0x02    /* presence pulse masking */
 #define DS2482_REG_CFG_APU             0x01    /* active pull-up */
 
-/* extra configurations - e.g. 1WS */
-static int extra_config;
 
 /**
  * Write and verify codes for the CHANNEL_SELECT command (DS2482-800 only).
@@ -130,6 +133,8 @@ struct ds2482_data {
  */
 static inline u8 ds2482_calculate_config(u8 conf)
 {
+       conf |= extra_config;
+
        if (ds2482_active_pullup)
                conf |= DS2482_REG_CFG_APU;
 
@@ -405,7 +410,7 @@ static u8 ds2482_w1_reset_bus(void *data)
                /* If the chip did reset since detect, re-config it */
                if (err & DS2482_REG_STS_RST)
                        ds2482_send_cmd_data(pdev, DS2482_CMD_WRITE_CONFIG,
-                                       ds2482_calculate_config(extra_config));
+                                            ds2482_calculate_config(0x00));
        }
 
        mutex_unlock(&pdev->access_lock);
@@ -431,7 +436,8 @@ static u8 ds2482_w1_set_pullup(void *data, int delay)
                ds2482_wait_1wire_idle(pdev);
                /* note: it seems like both SPU and APU have to be set! */
                retval = ds2482_send_cmd_data(pdev, DS2482_CMD_WRITE_CONFIG,
-                       ds2482_calculate_config(extra_config|DS2482_REG_CFG_SPU|DS2482_REG_CFG_APU));
+                       ds2482_calculate_config(DS2482_REG_CFG_SPU |
+                                               DS2482_REG_CFG_APU));
                ds2482_wait_1wire_idle(pdev);
        }
 
@@ -484,7 +490,7 @@ static int ds2482_probe(struct i2c_client *client,
 
        /* Set all config items to 0 (off) */
        ds2482_send_cmd_data(data, DS2482_CMD_WRITE_CONFIG,
-               ds2482_calculate_config(extra_config));
+               ds2482_calculate_config(0x00));
 
        mutex_init(&data->access_lock);
 
@@ -559,7 +565,5 @@ module_i2c_driver(ds2482_driver);
 
 MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>");
 MODULE_DESCRIPTION("DS2482 driver");
-module_param(extra_config, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(extra_config, "Extra Configuration settings 1=APU,2=PPM,3=SPU,8=1WS");
 
 MODULE_LICENSE("GPL");
index de01a6d0059dc4adcb98a24197750f72b0b4ceaf..a1c61e351d3f7ee5cb8e82ba4a391559e6a8aef2 100644 (file)
@@ -140,8 +140,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
-                          GFP_KERNEL);
+       vma_priv = kzalloc(struct_size(vma_priv, pages, count), GFP_KERNEL);
        if (!vma_priv)
                return -ENOMEM;
 
index c3e201025ef015b49703cf311e71f1d1f041ec6e..0782ff3c227352e7b92b595960cc62aee5c2c4ce 100644 (file)
@@ -622,9 +622,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
        if (xen_store_evtchn == 0)
                return -ENOENT;
 
-       nonseekable_open(inode, filp);
-
-       filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */
+       stream_open(inode, filp);
 
        u = kzalloc(sizeof(*u), GFP_KERNEL);
        if (u == NULL)
index 1c7955f5cdaf2e776026390f615806f3e6ce535c..128f2dbe256a4eb0f6124294f883b29d8a57e10e 100644 (file)
@@ -203,8 +203,7 @@ void afs_put_cb_interest(struct afs_net *net, struct afs_cb_interest *cbi)
  */
 void afs_init_callback_state(struct afs_server *server)
 {
-       if (!test_and_clear_bit(AFS_SERVER_FL_NEW, &server->flags))
-               server->cb_s_break++;
+       server->cb_s_break++;
 }
 
 /*
index 8ee5972893ed5a75583bfb2821a42636403ee086..2f8acb4c556d28c77ec6d8c130eaaf3916a38403 100644 (file)
@@ -34,7 +34,7 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *);
 static int afs_deliver_yfs_cb_callback(struct afs_call *);
 
 #define CM_NAME(name) \
-       const char afs_SRXCB##name##_name[] __tracepoint_string =       \
+       char afs_SRXCB##name##_name[] __tracepoint_string =     \
                "CB." #name
 
 /*
index 1a4ce07fb406da8e3a4e0d12c6fda605636ccb47..9cedc3fc1b7744679010f4aae412c92925cd3b3a 100644 (file)
@@ -216,9 +216,7 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
        set_nlink(inode, 2);
        inode->i_uid            = GLOBAL_ROOT_UID;
        inode->i_gid            = GLOBAL_ROOT_GID;
-       inode->i_ctime.tv_sec   = get_seconds();
-       inode->i_ctime.tv_nsec  = 0;
-       inode->i_atime          = inode->i_mtime = inode->i_ctime;
+       inode->i_ctime = inode->i_atime = inode->i_mtime = current_time(inode);
        inode->i_blocks         = 0;
        inode_set_iversion_raw(inode, 0);
        inode->i_generation     = 0;
index bb1f244b2b3ac2ff4a8428a8f72132a6ed230e01..3904ab0b95632af35c4db8fcad36ee6e1f277b47 100644 (file)
@@ -474,7 +474,6 @@ struct afs_server {
        time64_t                put_time;       /* Time at which last put */
        time64_t                update_at;      /* Time at which to next update the record */
        unsigned long           flags;
-#define AFS_SERVER_FL_NEW      0               /* New server, don't inc cb_s_break */
 #define AFS_SERVER_FL_NOT_READY        1               /* The record is not ready for use */
 #define AFS_SERVER_FL_NOT_FOUND        2               /* VL server says no such server */
 #define AFS_SERVER_FL_VL_FAIL  3               /* Failed to access VL server */
@@ -827,7 +826,7 @@ static inline struct afs_cb_interest *afs_get_cb_interest(struct afs_cb_interest
 
 static inline unsigned int afs_calc_vnode_cb_break(struct afs_vnode *vnode)
 {
-       return vnode->cb_break + vnode->cb_s_break + vnode->cb_v_break;
+       return vnode->cb_break + vnode->cb_v_break;
 }
 
 static inline bool afs_cb_is_broken(unsigned int cb_break,
@@ -835,7 +834,6 @@ static inline bool afs_cb_is_broken(unsigned int cb_break,
                                    const struct afs_cb_interest *cbi)
 {
        return !cbi || cb_break != (vnode->cb_break +
-                                   cbi->server->cb_s_break +
                                    vnode->volume->cb_v_break);
 }
 
index 2c588f9bbbda226ec64fa0670e9c92c700f259e6..15c7e82d80cb30c0358db68f416b8d88b06dbc7c 100644 (file)
@@ -572,13 +572,17 @@ static void afs_deliver_to_call(struct afs_call *call)
                case -ENODATA:
                case -EBADMSG:
                case -EMSGSIZE:
-               default:
                        abort_code = RXGEN_CC_UNMARSHAL;
                        if (state != AFS_CALL_CL_AWAIT_REPLY)
                                abort_code = RXGEN_SS_UNMARSHAL;
                        rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
                                                abort_code, ret, "KUM");
                        goto local_abort;
+               default:
+                       abort_code = RX_USER_ABORT;
+                       rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
+                                               abort_code, ret, "KER");
+                       goto local_abort;
                }
        }
 
@@ -610,6 +614,7 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
        bool stalled = false;
        u64 rtt;
        u32 life, last_life;
+       bool rxrpc_complete = false;
 
        DECLARE_WAITQUEUE(myself, current);
 
@@ -621,7 +626,7 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
                rtt2 = 2;
 
        timeout = rtt2;
-       last_life = rxrpc_kernel_check_life(call->net->socket, call->rxcall);
+       rxrpc_kernel_check_life(call->net->socket, call->rxcall, &last_life);
 
        add_wait_queue(&call->waitq, &myself);
        for (;;) {
@@ -639,7 +644,12 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
                if (afs_check_call_state(call, AFS_CALL_COMPLETE))
                        break;
 
-               life = rxrpc_kernel_check_life(call->net->socket, call->rxcall);
+               if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall, &life)) {
+                       /* rxrpc terminated the call. */
+                       rxrpc_complete = true;
+                       break;
+               }
+
                if (timeout == 0 &&
                    life == last_life && signal_pending(current)) {
                        if (stalled)
@@ -663,12 +673,16 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
        remove_wait_queue(&call->waitq, &myself);
        __set_current_state(TASK_RUNNING);
 
-       /* Kill off the call if it's still live. */
        if (!afs_check_call_state(call, AFS_CALL_COMPLETE)) {
-               _debug("call interrupted");
-               if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
-                                           RX_USER_ABORT, -EINTR, "KWI"))
-                       afs_set_call_complete(call, -EINTR, 0);
+               if (rxrpc_complete) {
+                       afs_set_call_complete(call, call->error, call->abort_code);
+               } else {
+                       /* Kill off the call if it's still live. */
+                       _debug("call interrupted");
+                       if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
+                                                   RX_USER_ABORT, -EINTR, "KWI"))
+                               afs_set_call_complete(call, -EINTR, 0);
+               }
        }
 
        spin_lock_bh(&call->state_lock);
index 642afa2e9783c4f95284980dd8054610fa4d49cf..65b33b6da48b9411c8385a27869785d5076713b1 100644 (file)
@@ -226,7 +226,6 @@ static struct afs_server *afs_alloc_server(struct afs_net *net,
        RCU_INIT_POINTER(server->addresses, alist);
        server->addr_version = alist->version;
        server->uuid = *uuid;
-       server->flags = (1UL << AFS_SERVER_FL_NEW);
        server->update_at = ktime_get_real_seconds() + afs_server_update_delay;
        rwlock_init(&server->fs_lock);
        INIT_HLIST_HEAD(&server->cb_volumes);
index 72efcfcf9f95efd2b5cae1257a8d01247367ebeb..0122d7445fba1e07eaf62b4be1d1e69c66e5f7c4 100644 (file)
@@ -264,6 +264,7 @@ static void afs_kill_pages(struct address_space *mapping,
                                first = page->index + 1;
                        lock_page(page);
                        generic_error_remove_page(mapping, page);
+                       unlock_page(page);
                }
 
                __pagevec_release(&pv);
index 38b741aef0bf5a93513f1ad1f8ab61b9de7c8078..3490d1fa0e16f4f1f189727661e18696ab3a7a08 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -181,7 +181,7 @@ struct poll_iocb {
        struct file             *file;
        struct wait_queue_head  *head;
        __poll_t                events;
-       bool                    woken;
+       bool                    done;
        bool                    cancelled;
        struct wait_queue_entry wait;
        struct work_struct      work;
@@ -204,8 +204,7 @@ struct aio_kiocb {
        struct kioctx           *ki_ctx;
        kiocb_cancel_fn         *ki_cancel;
 
-       struct iocb __user      *ki_user_iocb;  /* user's aiocb */
-       __u64                   ki_user_data;   /* user's data for completion */
+       struct io_event         ki_res;
 
        struct list_head        ki_list;        /* the aio core uses this
                                                 * for cancellation */
@@ -1022,6 +1021,9 @@ static bool get_reqs_available(struct kioctx *ctx)
 /* aio_get_req
  *     Allocate a slot for an aio request.
  * Returns NULL if no requests are free.
+ *
+ * The refcount is initialized to 2 - one for the async op completion,
+ * one for the synchronous code that does this.
  */
 static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
 {
@@ -1031,10 +1033,15 @@ static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
        if (unlikely(!req))
                return NULL;
 
+       if (unlikely(!get_reqs_available(ctx))) {
+               kmem_cache_free(kiocb_cachep, req);
+               return NULL;
+       }
+
        percpu_ref_get(&ctx->reqs);
        req->ki_ctx = ctx;
        INIT_LIST_HEAD(&req->ki_list);
-       refcount_set(&req->ki_refcnt, 0);
+       refcount_set(&req->ki_refcnt, 2);
        req->ki_eventfd = NULL;
        return req;
 }
@@ -1067,30 +1074,20 @@ out:
        return ret;
 }
 
-static inline void iocb_put(struct aio_kiocb *iocb)
-{
-       if (refcount_read(&iocb->ki_refcnt) == 0 ||
-           refcount_dec_and_test(&iocb->ki_refcnt)) {
-               if (iocb->ki_filp)
-                       fput(iocb->ki_filp);
-               percpu_ref_put(&iocb->ki_ctx->reqs);
-               kmem_cache_free(kiocb_cachep, iocb);
-       }
-}
-
-static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb,
-                          long res, long res2)
+static inline void iocb_destroy(struct aio_kiocb *iocb)
 {
-       ev->obj = (u64)(unsigned long)iocb->ki_user_iocb;
-       ev->data = iocb->ki_user_data;
-       ev->res = res;
-       ev->res2 = res2;
+       if (iocb->ki_eventfd)
+               eventfd_ctx_put(iocb->ki_eventfd);
+       if (iocb->ki_filp)
+               fput(iocb->ki_filp);
+       percpu_ref_put(&iocb->ki_ctx->reqs);
+       kmem_cache_free(kiocb_cachep, iocb);
 }
 
 /* aio_complete
  *     Called when the io request on the given iocb is complete.
  */
-static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
+static void aio_complete(struct aio_kiocb *iocb)
 {
        struct kioctx   *ctx = iocb->ki_ctx;
        struct aio_ring *ring;
@@ -1114,14 +1111,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
        ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
        event = ev_page + pos % AIO_EVENTS_PER_PAGE;
 
-       aio_fill_event(event, iocb, res, res2);
+       *event = iocb->ki_res;
 
        kunmap_atomic(ev_page);
        flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
 
-       pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
-                ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data,
-                res, res2);
+       pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
+                (void __user *)(unsigned long)iocb->ki_res.obj,
+                iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
 
        /* after flagging the request as done, we
         * must never even look at it again
@@ -1148,10 +1145,8 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
         * eventfd. The eventfd_signal() function is safe to be called
         * from IRQ context.
         */
-       if (iocb->ki_eventfd) {
+       if (iocb->ki_eventfd)
                eventfd_signal(iocb->ki_eventfd, 1);
-               eventfd_ctx_put(iocb->ki_eventfd);
-       }
 
        /*
         * We have to order our ring_info tail store above and test
@@ -1163,7 +1158,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
 
        if (waitqueue_active(&ctx->wait))
                wake_up(&ctx->wait);
-       iocb_put(iocb);
+}
+
+static inline void iocb_put(struct aio_kiocb *iocb)
+{
+       if (refcount_dec_and_test(&iocb->ki_refcnt)) {
+               aio_complete(iocb);
+               iocb_destroy(iocb);
+       }
 }
 
 /* aio_read_events_ring
@@ -1437,7 +1439,9 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
                file_end_write(kiocb->ki_filp);
        }
 
-       aio_complete(iocb, res, res2);
+       iocb->ki_res.res = res;
+       iocb->ki_res.res2 = res2;
+       iocb_put(iocb);
 }
 
 static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
@@ -1514,13 +1518,13 @@ static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
        }
 }
 
-static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb,
+static int aio_read(struct kiocb *req, const struct iocb *iocb,
                        bool vectored, bool compat)
 {
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
        struct iov_iter iter;
        struct file *file;
-       ssize_t ret;
+       int ret;
 
        ret = aio_prep_rw(req, iocb);
        if (ret)
@@ -1542,13 +1546,13 @@ static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb,
        return ret;
 }
 
-static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
+static int aio_write(struct kiocb *req, const struct iocb *iocb,
                         bool vectored, bool compat)
 {
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
        struct iov_iter iter;
        struct file *file;
-       ssize_t ret;
+       int ret;
 
        ret = aio_prep_rw(req, iocb);
        if (ret)
@@ -1585,11 +1589,10 @@ static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
 
 static void aio_fsync_work(struct work_struct *work)
 {
-       struct fsync_iocb *req = container_of(work, struct fsync_iocb, work);
-       int ret;
+       struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
 
-       ret = vfs_fsync(req->file, req->datasync);
-       aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0);
+       iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
+       iocb_put(iocb);
 }
 
 static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
@@ -1608,11 +1611,6 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
        return 0;
 }
 
-static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
-{
-       aio_complete(iocb, mangle_poll(mask), 0);
-}
-
 static void aio_poll_complete_work(struct work_struct *work)
 {
        struct poll_iocb *req = container_of(work, struct poll_iocb, work);
@@ -1638,9 +1636,11 @@ static void aio_poll_complete_work(struct work_struct *work)
                return;
        }
        list_del_init(&iocb->ki_list);
+       iocb->ki_res.res = mangle_poll(mask);
+       req->done = true;
        spin_unlock_irq(&ctx->ctx_lock);
 
-       aio_poll_complete(iocb, mask);
+       iocb_put(iocb);
 }
 
 /* assumes we are called with irqs disabled */
@@ -1668,31 +1668,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
        __poll_t mask = key_to_poll(key);
        unsigned long flags;
 
-       req->woken = true;
-
        /* for instances that support it check for an event match first: */
-       if (mask) {
-               if (!(mask & req->events))
-                       return 0;
+       if (mask && !(mask & req->events))
+               return 0;
+
+       list_del_init(&req->wait.entry);
 
+       if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
                /*
                 * Try to complete the iocb inline if we can. Use
                 * irqsave/irqrestore because not all filesystems (e.g. fuse)
                 * call this function with IRQs disabled and because IRQs
                 * have to be disabled before ctx_lock is obtained.
                 */
-               if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
-                       list_del(&iocb->ki_list);
-                       spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
-
-                       list_del_init(&req->wait.entry);
-                       aio_poll_complete(iocb, mask);
-                       return 1;
-               }
+               list_del(&iocb->ki_list);
+               iocb->ki_res.res = mangle_poll(mask);
+               req->done = true;
+               spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
+               iocb_put(iocb);
+       } else {
+               schedule_work(&req->work);
        }
-
-       list_del_init(&req->wait.entry);
-       schedule_work(&req->work);
        return 1;
 }
 
@@ -1719,11 +1715,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
        add_wait_queue(head, &pt->iocb->poll.wait);
 }
 
-static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
+static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
 {
        struct kioctx *ctx = aiocb->ki_ctx;
        struct poll_iocb *req = &aiocb->poll;
        struct aio_poll_table apt;
+       bool cancel = false;
        __poll_t mask;
 
        /* reject any unknown events outside the normal event mask. */
@@ -1737,7 +1734,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
        req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
 
        req->head = NULL;
-       req->woken = false;
+       req->done = false;
        req->cancelled = false;
 
        apt.pt._qproc = aio_poll_queue_proc;
@@ -1749,156 +1746,135 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
        INIT_LIST_HEAD(&req->wait.entry);
        init_waitqueue_func_entry(&req->wait, aio_poll_wake);
 
-       /* one for removal from waitqueue, one for this function */
-       refcount_set(&aiocb->ki_refcnt, 2);
-
        mask = vfs_poll(req->file, &apt.pt) & req->events;
-       if (unlikely(!req->head)) {
-               /* we did not manage to set up a waitqueue, done */
-               goto out;
-       }
-
        spin_lock_irq(&ctx->ctx_lock);
-       spin_lock(&req->head->lock);
-       if (req->woken) {
-               /* wake_up context handles the rest */
-               mask = 0;
+       if (likely(req->head)) {
+               spin_lock(&req->head->lock);
+               if (unlikely(list_empty(&req->wait.entry))) {
+                       if (apt.error)
+                               cancel = true;
+                       apt.error = 0;
+                       mask = 0;
+               }
+               if (mask || apt.error) {
+                       list_del_init(&req->wait.entry);
+               } else if (cancel) {
+                       WRITE_ONCE(req->cancelled, true);
+               } else if (!req->done) { /* actually waiting for an event */
+                       list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
+                       aiocb->ki_cancel = aio_poll_cancel;
+               }
+               spin_unlock(&req->head->lock);
+       }
+       if (mask) { /* no async, we'd stolen it */
+               aiocb->ki_res.res = mangle_poll(mask);
                apt.error = 0;
-       } else if (mask || apt.error) {
-               /* if we get an error or a mask we are done */
-               WARN_ON_ONCE(list_empty(&req->wait.entry));
-               list_del_init(&req->wait.entry);
-       } else {
-               /* actually waiting for an event */
-               list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
-               aiocb->ki_cancel = aio_poll_cancel;
        }
-       spin_unlock(&req->head->lock);
        spin_unlock_irq(&ctx->ctx_lock);
-
-out:
-       if (unlikely(apt.error))
-               return apt.error;
-
        if (mask)
-               aio_poll_complete(aiocb, mask);
-       iocb_put(aiocb);
-       return 0;
+               iocb_put(aiocb);
+       return apt.error;
 }
 
 static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
-                          struct iocb __user *user_iocb, bool compat)
+                          struct iocb __user *user_iocb, struct aio_kiocb *req,
+                          bool compat)
 {
-       struct aio_kiocb *req;
-       ssize_t ret;
-
-       /* enforce forwards compatibility on users */
-       if (unlikely(iocb->aio_reserved2)) {
-               pr_debug("EINVAL: reserve field set\n");
-               return -EINVAL;
-       }
-
-       /* prevent overflows */
-       if (unlikely(
-           (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
-           (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
-           ((ssize_t)iocb->aio_nbytes < 0)
-          )) {
-               pr_debug("EINVAL: overflow check\n");
-               return -EINVAL;
-       }
-
-       if (!get_reqs_available(ctx))
-               return -EAGAIN;
-
-       ret = -EAGAIN;
-       req = aio_get_req(ctx);
-       if (unlikely(!req))
-               goto out_put_reqs_available;
-
        req->ki_filp = fget(iocb->aio_fildes);
-       ret = -EBADF;
        if (unlikely(!req->ki_filp))
-               goto out_put_req;
+               return -EBADF;
 
        if (iocb->aio_flags & IOCB_FLAG_RESFD) {
+               struct eventfd_ctx *eventfd;
                /*
                 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
                 * instance of the file* now. The file descriptor must be
                 * an eventfd() fd, and will be signaled for each completed
                 * event using the eventfd_signal() function.
                 */
-               req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
-               if (IS_ERR(req->ki_eventfd)) {
-                       ret = PTR_ERR(req->ki_eventfd);
-                       req->ki_eventfd = NULL;
-                       goto out_put_req;
-               }
+               eventfd = eventfd_ctx_fdget(iocb->aio_resfd);
+               if (IS_ERR(eventfd))
+                       return PTR_ERR(eventfd);
+
+               req->ki_eventfd = eventfd;
        }
 
-       ret = put_user(KIOCB_KEY, &user_iocb->aio_key);
-       if (unlikely(ret)) {
+       if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) {
                pr_debug("EFAULT: aio_key\n");
-               goto out_put_req;
+               return -EFAULT;
        }
 
-       req->ki_user_iocb = user_iocb;
-       req->ki_user_data = iocb->aio_data;
+       req->ki_res.obj = (u64)(unsigned long)user_iocb;
+       req->ki_res.data = iocb->aio_data;
+       req->ki_res.res = 0;
+       req->ki_res.res2 = 0;
 
        switch (iocb->aio_lio_opcode) {
        case IOCB_CMD_PREAD:
-               ret = aio_read(&req->rw, iocb, false, compat);
-               break;
+               return aio_read(&req->rw, iocb, false, compat);
        case IOCB_CMD_PWRITE:
-               ret = aio_write(&req->rw, iocb, false, compat);
-               break;
+               return aio_write(&req->rw, iocb, false, compat);
        case IOCB_CMD_PREADV:
-               ret = aio_read(&req->rw, iocb, true, compat);
-               break;
+               return aio_read(&req->rw, iocb, true, compat);
        case IOCB_CMD_PWRITEV:
-               ret = aio_write(&req->rw, iocb, true, compat);
-               break;
+               return aio_write(&req->rw, iocb, true, compat);
        case IOCB_CMD_FSYNC:
-               ret = aio_fsync(&req->fsync, iocb, false);
-               break;
+               return aio_fsync(&req->fsync, iocb, false);
        case IOCB_CMD_FDSYNC:
-               ret = aio_fsync(&req->fsync, iocb, true);
-               break;
+               return aio_fsync(&req->fsync, iocb, true);
        case IOCB_CMD_POLL:
-               ret = aio_poll(req, iocb);
-               break;
+               return aio_poll(req, iocb);
        default:
                pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
-               ret = -EINVAL;
-               break;
+               return -EINVAL;
        }
-
-       /*
-        * If ret is 0, we'd either done aio_complete() ourselves or have
-        * arranged for that to be done asynchronously.  Anything non-zero
-        * means that we need to destroy req ourselves.
-        */
-       if (ret)
-               goto out_put_req;
-       return 0;
-out_put_req:
-       if (req->ki_eventfd)
-               eventfd_ctx_put(req->ki_eventfd);
-       iocb_put(req);
-out_put_reqs_available:
-       put_reqs_available(ctx, 1);
-       return ret;
 }
 
 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
                         bool compat)
 {
+       struct aio_kiocb *req;
        struct iocb iocb;
+       int err;
 
        if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
                return -EFAULT;
 
-       return __io_submit_one(ctx, &iocb, user_iocb, compat);
+       /* enforce forwards compatibility on users */
+       if (unlikely(iocb.aio_reserved2)) {
+               pr_debug("EINVAL: reserve field set\n");
+               return -EINVAL;
+       }
+
+       /* prevent overflows */
+       if (unlikely(
+           (iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
+           (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
+           ((ssize_t)iocb.aio_nbytes < 0)
+          )) {
+               pr_debug("EINVAL: overflow check\n");
+               return -EINVAL;
+       }
+
+       req = aio_get_req(ctx);
+       if (unlikely(!req))
+               return -EAGAIN;
+
+       err = __io_submit_one(ctx, &iocb, user_iocb, req, compat);
+
+       /* Done with the synchronous reference */
+       iocb_put(req);
+
+       /*
+        * If err is 0, we'd either done aio_complete() ourselves or have
+        * arranged for that to be done asynchronously.  Anything non-zero
+        * means that we need to destroy req ourselves.
+        */
+       if (unlikely(err)) {
+               iocb_destroy(req);
+               put_reqs_available(ctx, 1);
+       }
+       return err;
 }
 
 /* sys_io_submit:
@@ -1997,24 +1973,6 @@ COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
 }
 #endif
 
-/* lookup_kiocb
- *     Finds a given iocb for cancellation.
- */
-static struct aio_kiocb *
-lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb)
-{
-       struct aio_kiocb *kiocb;
-
-       assert_spin_locked(&ctx->ctx_lock);
-
-       /* TODO: use a hash or array, this sucks. */
-       list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
-               if (kiocb->ki_user_iocb == iocb)
-                       return kiocb;
-       }
-       return NULL;
-}
-
 /* sys_io_cancel:
  *     Attempts to cancel an iocb previously passed to io_submit.  If
  *     the operation is successfully cancelled, the resulting event is
@@ -2032,6 +1990,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
        struct aio_kiocb *kiocb;
        int ret = -EINVAL;
        u32 key;
+       u64 obj = (u64)(unsigned long)iocb;
 
        if (unlikely(get_user(key, &iocb->aio_key)))
                return -EFAULT;
@@ -2043,10 +2002,13 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
                return -EINVAL;
 
        spin_lock_irq(&ctx->ctx_lock);
-       kiocb = lookup_kiocb(ctx, iocb);
-       if (kiocb) {
-               ret = kiocb->ki_cancel(&kiocb->rw);
-               list_del_init(&kiocb->ki_list);
+       /* TODO: use a hash or array, this sucks. */
+       list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
+               if (kiocb->ki_res.obj == obj) {
+                       ret = kiocb->ki_cancel(&kiocb->rw);
+                       list_del_init(&kiocb->ki_list);
+                       break;
+               }
        }
        spin_unlock_irq(&ctx->ctx_lock);
 
index 78d3257435c00b76633ee6168a2586ccfa70118c..24615c76c1d0e20739db509d3ddde0e111994e2b 100644 (file)
@@ -307,10 +307,10 @@ static void blkdev_bio_end_io(struct bio *bio)
        struct blkdev_dio *dio = bio->bi_private;
        bool should_dirty = dio->should_dirty;
 
-       if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) {
-               if (bio->bi_status && !dio->bio.bi_status)
-                       dio->bio.bi_status = bio->bi_status;
-       } else {
+       if (bio->bi_status && !dio->bio.bi_status)
+               dio->bio.bi_status = bio->bi_status;
+
+       if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) {
                if (!dio->is_sync) {
                        struct kiocb *iocb = dio->iocb;
                        ssize_t ret;
index ec2d8919e7fb0ee63c28bcfd241d0e8a54ed05a3..cd4e693406a0e62bda2171d7d6800c8b4ef65ab0 100644 (file)
@@ -501,6 +501,16 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
+       /*
+        * If the fs is mounted with nologreplay, which requires it to be
+        * mounted in RO mode as well, we can not allow discard on free space
+        * inside block groups, because log trees refer to extents that are not
+        * pinned in a block group's free space cache (pinning the extents is
+        * precisely the first phase of replaying a log tree).
+        */
+       if (btrfs_test_opt(fs_info, NOLOGREPLAY))
+               return -EROFS;
+
        rcu_read_lock();
        list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
                                dev_list) {
index dc6140013ae8194739a8aa6a387f12c35794bdf9..61d22a56c0ba4e7d43f0552854f4ac4e82443218 100644 (file)
@@ -366,11 +366,11 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
 
 static int prop_compression_validate(const char *value, size_t len)
 {
-       if (!strncmp("lzo", value, len))
+       if (!strncmp("lzo", value, 3))
                return 0;
-       else if (!strncmp("zlib", value, len))
+       else if (!strncmp("zlib", value, 4))
                return 0;
-       else if (!strncmp("zstd", value, len))
+       else if (!strncmp("zstd", value, 4))
                return 0;
 
        return -EINVAL;
@@ -396,7 +396,7 @@ static int prop_compression_apply(struct inode *inode,
                btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
        } else if (!strncmp("zlib", value, 4)) {
                type = BTRFS_COMPRESS_ZLIB;
-       } else if (!strncmp("zstd", value, len)) {
+       } else if (!strncmp("zstd", value, 4)) {
                type = BTRFS_COMPRESS_ZSTD;
                btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
        } else {
index a279c58fe3606224df4c7c441090ed07f98d15b9..d18cad28c1c3a0af802c4c35775582996000f664 100644 (file)
@@ -88,22 +88,31 @@ static int find_dynamic_major(void)
 /*
  * Register a single major with a specified minor range.
  *
- * If major == 0 this functions will dynamically allocate a major and return
- * its number.
- *
- * If major > 0 this function will attempt to reserve the passed range of
- * minors and will return zero on success.
+ * If major == 0 this function will dynamically allocate an unused major.
+ * If major > 0 this function will attempt to reserve the range of minors
+ * with given major.
  *
- * Returns a -ve errno on failure.
  */
 static struct char_device_struct *
 __register_chrdev_region(unsigned int major, unsigned int baseminor,
                           int minorct, const char *name)
 {
-       struct char_device_struct *cd, **cp;
-       int ret = 0;
+       struct char_device_struct *cd, *curr, *prev = NULL;
+       int ret = -EBUSY;
        int i;
 
+       if (major >= CHRDEV_MAJOR_MAX) {
+               pr_err("CHRDEV \"%s\" major requested (%u) is greater than the maximum (%u)\n",
+                      name, major, CHRDEV_MAJOR_MAX-1);
+               return ERR_PTR(-EINVAL);
+       }
+
+       if (minorct > MINORMASK + 1 - baseminor) {
+               pr_err("CHRDEV \"%s\" minor range requested (%u-%u) is out of range of maximum range (%u-%u) for a single major\n",
+                       name, baseminor, baseminor + minorct - 1, 0, MINORMASK);
+               return ERR_PTR(-EINVAL);
+       }
+
        cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
        if (cd == NULL)
                return ERR_PTR(-ENOMEM);
@@ -120,10 +129,20 @@ __register_chrdev_region(unsigned int major, unsigned int baseminor,
                major = ret;
        }
 
-       if (major >= CHRDEV_MAJOR_MAX) {
-               pr_err("CHRDEV \"%s\" major requested (%u) is greater than the maximum (%u)\n",
-                      name, major, CHRDEV_MAJOR_MAX-1);
-               ret = -EINVAL;
+       i = major_to_index(major);
+       for (curr = chrdevs[i]; curr; prev = curr, curr = curr->next) {
+               if (curr->major < major)
+                       continue;
+
+               if (curr->major > major)
+                       break;
+
+               if (curr->baseminor + curr->minorct <= baseminor)
+                       continue;
+
+               if (curr->baseminor >= baseminor + minorct)
+                       break;
+
                goto out;
        }
 
@@ -132,37 +151,14 @@ __register_chrdev_region(unsigned int major, unsigned int baseminor,
        cd->minorct = minorct;
        strlcpy(cd->name, name, sizeof(cd->name));
 
-       i = major_to_index(major);
-
-       for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
-               if ((*cp)->major > major ||
-                   ((*cp)->major == major &&
-                    (((*cp)->baseminor >= baseminor) ||
-                     ((*cp)->baseminor + (*cp)->minorct > baseminor))))
-                       break;
-
-       /* Check for overlapping minor ranges.  */
-       if (*cp && (*cp)->major == major) {
-               int old_min = (*cp)->baseminor;
-               int old_max = (*cp)->baseminor + (*cp)->minorct - 1;
-               int new_min = baseminor;
-               int new_max = baseminor + minorct - 1;
-
-               /* New driver overlaps from the left.  */
-               if (new_max >= old_min && new_max <= old_max) {
-                       ret = -EBUSY;
-                       goto out;
-               }
-
-               /* New driver overlaps from the right.  */
-               if (new_min <= old_max && new_min >= old_min) {
-                       ret = -EBUSY;
-                       goto out;
-               }
+       if (!prev) {
+               cd->next = curr;
+               chrdevs[i] = cd;
+       } else {
+               cd->next = prev->next;
+               prev->next = cd;
        }
 
-       cd->next = *cp;
-       *cp = cd;
        mutex_unlock(&chrdevs_lock);
        return cd;
 out:
index f9b71c12cc9f6d46267eaf73a801dd00715a9cf2..a05bf1d6e1d04143da40126e0a07ea927347001e 100644 (file)
@@ -559,6 +559,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
                        tcon->ses->server->echo_interval / HZ);
        if (tcon->snapshot_time)
                seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
+       if (tcon->handle_timeout)
+               seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
        /* convert actimeo and display it in seconds */
        seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
 
index 38feae812b4704b315ee3adfe2a1eaa2c4740e45..585ad3207cb120a34c3da24e418dd20a6daf04cf 100644 (file)
  */
 #define CIFS_MAX_ACTIMEO (1 << 30)
 
+/*
+ * Max persistent and resilient handle timeout (milliseconds).
+ * Windows durable max was 960000 (16 minutes)
+ */
+#define SMB3_MAX_HANDLE_TIMEOUT 960000
+
 /*
  * MAX_REQ is the maximum number of requests that WE will send
  * on one socket concurrently.
@@ -586,6 +592,7 @@ struct smb_vol {
        struct nls_table *local_nls;
        unsigned int echo_interval; /* echo interval in secs */
        __u64 snapshot_time; /* needed for timewarp tokens */
+       __u32 handle_timeout; /* persistent and durable handle timeout in ms */
        unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */
 };
 
@@ -1058,6 +1065,7 @@ struct cifs_tcon {
        __u32 vol_serial_number;
        __le64 vol_create_time;
        __u64 snapshot_time; /* for timewarp tokens - timestamp of snapshot */
+       __u32 handle_timeout; /* persistent and durable handle timeout in ms */
        __u32 ss_flags;         /* sector size flags */
        __u32 perf_sector_size; /* best sector size for perf */
        __u32 max_chunks;
@@ -1325,6 +1333,7 @@ cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
 }
 
 struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file);
+void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr);
 void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
 
 #define CIFS_CACHE_READ_FLG    1
@@ -1847,6 +1856,7 @@ GLOBAL_EXTERN spinlock_t gidsidlock;
 #endif /* CONFIG_CIFS_ACL */
 
 void cifs_oplock_break(struct work_struct *work);
+void cifs_queue_oplock_break(struct cifsFileInfo *cfile);
 
 extern const struct slow_work_ops cifs_oplock_break_ops;
 extern struct workqueue_struct *cifsiod_wq;
index a8e9738db691294736105bf8a0cd03772a9a2447..4c0e44489f21497670131b5a889ae6f4eb0b843e 100644 (file)
@@ -103,7 +103,7 @@ enum {
        Opt_cruid, Opt_gid, Opt_file_mode,
        Opt_dirmode, Opt_port,
        Opt_blocksize, Opt_rsize, Opt_wsize, Opt_actimeo,
-       Opt_echo_interval, Opt_max_credits,
+       Opt_echo_interval, Opt_max_credits, Opt_handletimeout,
        Opt_snapshot,
 
        /* Mount options which take string value */
@@ -208,6 +208,7 @@ static const match_table_t cifs_mount_option_tokens = {
        { Opt_rsize, "rsize=%s" },
        { Opt_wsize, "wsize=%s" },
        { Opt_actimeo, "actimeo=%s" },
+       { Opt_handletimeout, "handletimeout=%s" },
        { Opt_echo_interval, "echo_interval=%s" },
        { Opt_max_credits, "max_credits=%s" },
        { Opt_snapshot, "snapshot=%s" },
@@ -1619,6 +1620,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
 
        vol->actimeo = CIFS_DEF_ACTIMEO;
 
+       /* Most clients set timeout to 0, allows server to use its default */
+       vol->handle_timeout = 0; /* See MS-SMB2 spec section 2.2.14.2.12 */
+
        /* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */
        vol->ops = &smb30_operations;
        vol->vals = &smbdefault_values;
@@ -2017,6 +2021,18 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                                goto cifs_parse_mount_err;
                        }
                        break;
+               case Opt_handletimeout:
+                       if (get_option_ul(args, &option)) {
+                               cifs_dbg(VFS, "%s: Invalid handletimeout value\n",
+                                        __func__);
+                               goto cifs_parse_mount_err;
+                       }
+                       vol->handle_timeout = option;
+                       if (vol->handle_timeout > SMB3_MAX_HANDLE_TIMEOUT) {
+                               cifs_dbg(VFS, "Invalid handle cache timeout, longer than 16 minutes\n");
+                               goto cifs_parse_mount_err;
+                       }
+                       break;
                case Opt_echo_interval:
                        if (get_option_ul(args, &option)) {
                                cifs_dbg(VFS, "%s: Invalid echo interval value\n",
@@ -3183,6 +3199,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb_vol *volume_info)
                return 0;
        if (tcon->snapshot_time != volume_info->snapshot_time)
                return 0;
+       if (tcon->handle_timeout != volume_info->handle_timeout)
+               return 0;
        return 1;
 }
 
@@ -3297,6 +3315,16 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
                        tcon->snapshot_time = volume_info->snapshot_time;
        }
 
+       if (volume_info->handle_timeout) {
+               if (ses->server->vals->protocol_id == 0) {
+                       cifs_dbg(VFS,
+                            "Use SMB2.1 or later for handle timeout option\n");
+                       rc = -EOPNOTSUPP;
+                       goto out_fail;
+               } else
+                       tcon->handle_timeout = volume_info->handle_timeout;
+       }
+
        tcon->ses = ses;
        if (volume_info->password) {
                tcon->password = kstrdup(volume_info->password, GFP_KERNEL);
index 89006e044973ec2d97ad784236f66cb447431693..9c0ccc06d172e9371513105a9a19cb9cef68ddc0 100644 (file)
@@ -360,12 +360,30 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file)
        return cifs_file;
 }
 
-/*
- * Release a reference on the file private data. This may involve closing
- * the filehandle out on the server. Must be called without holding
- * tcon->open_file_lock and cifs_file->file_info_lock.
+/**
+ * cifsFileInfo_put - release a reference of file priv data
+ *
+ * Always potentially wait for oplock handler. See _cifsFileInfo_put().
  */
 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
+{
+       _cifsFileInfo_put(cifs_file, true);
+}
+
+/**
+ * _cifsFileInfo_put - release a reference of file priv data
+ *
+ * This may involve closing the filehandle @cifs_file out on the
+ * server. Must be called without holding tcon->open_file_lock and
+ * cifs_file->file_info_lock.
+ *
+ * If @wait_for_oplock_handler is true and we are releasing the last
+ * reference, wait for any running oplock break handler of the file
+ * and cancel any pending one. If calling this function from the
+ * oplock break handler, you need to pass false.
+ *
+ */
+void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
 {
        struct inode *inode = d_inode(cifs_file->dentry);
        struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
@@ -414,7 +432,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
 
        spin_unlock(&tcon->open_file_lock);
 
-       oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
+       oplock_break_cancelled = wait_oplock_handler ?
+               cancel_work_sync(&cifs_file->oplock_break) : false;
 
        if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
                struct TCP_Server_Info *server = tcon->ses->server;
@@ -4603,6 +4622,7 @@ void cifs_oplock_break(struct work_struct *work)
                                                             cinode);
                cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
        }
+       _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
        cifs_done_oplock_break(cinode);
 }
 
index bee203055b300b1d13a284e9571225f7ba4888c8..1e1626a2cfc393afb5ea6470261e6bd9703a2e40 100644 (file)
@@ -501,8 +501,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
                                           CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
                                           &pCifsInode->flags);
 
-                               queue_work(cifsoplockd_wq,
-                                          &netfile->oplock_break);
+                               cifs_queue_oplock_break(netfile);
                                netfile->oplock_break_cancelled = false;
 
                                spin_unlock(&tcon->open_file_lock);
@@ -607,6 +606,28 @@ void cifs_put_writer(struct cifsInodeInfo *cinode)
        spin_unlock(&cinode->writers_lock);
 }
 
+/**
+ * cifs_queue_oplock_break - queue the oplock break handler for cfile
+ *
+ * This function is called from the demultiplex thread when it
+ * receives an oplock break for @cfile.
+ *
+ * Assumes the tcon->open_file_lock is held.
+ * Assumes cfile->file_info_lock is NOT held.
+ */
+void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
+{
+       /*
+        * Bump the handle refcount now while we hold the
+        * open_file_lock to enforce the validity of it for the oplock
+        * break handler. The matching put is done at the end of the
+        * handler.
+        */
+       cifsFileInfo_get(cfile);
+
+       queue_work(cifsoplockd_wq, &cfile->oplock_break);
+}
+
 void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
 {
        clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
index b204e84b87fb52d938dc138379f7877ffb2ba74a..54bffb2a1786d00c5becdb9c2c275c0aa5f87c99 100644 (file)
@@ -68,13 +68,15 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
 
 
         if (oparms->tcon->use_resilient) {
-               nr_ioctl_req.Timeout = 0; /* use server default (120 seconds) */
+               /* default timeout is 0, servers pick default (120 seconds) */
+               nr_ioctl_req.Timeout =
+                       cpu_to_le32(oparms->tcon->handle_timeout);
                nr_ioctl_req.Reserved = 0;
                rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid,
                        fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY,
                        true /* is_fsctl */,
                        (char *)&nr_ioctl_req, sizeof(nr_ioctl_req),
-                       NULL, NULL /* no return info */);
+                       CIFSMaxBufSize, NULL, NULL /* no return info */);
                if (rc == -EOPNOTSUPP) {
                        cifs_dbg(VFS,
                             "resiliency not supported by server, disabling\n");
index 0e3570e40ff8e8d233389063290ccdecdfb44a25..e311f58dc1c82809de0283e434a9aff09356825f 100644 (file)
@@ -555,7 +555,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
                        clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
                                  &cinode->flags);
 
-               queue_work(cifsoplockd_wq, &cfile->oplock_break);
+               cifs_queue_oplock_break(cfile);
                kfree(lw);
                return true;
        }
@@ -712,8 +712,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
                                           CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
                                           &cinode->flags);
                                spin_unlock(&cfile->file_info_lock);
-                               queue_work(cifsoplockd_wq,
-                                          &cfile->oplock_break);
+
+                               cifs_queue_oplock_break(cfile);
 
                                spin_unlock(&tcon->open_file_lock);
                                spin_unlock(&cifs_tcp_ses_lock);
index 1022a3771e140d819e767ba5a75677a88d65f911..c36ff0d1fe2a8b7b2668466464fc9da9e45a774f 100644 (file)
@@ -581,7 +581,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
        rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
                        FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
                        NULL /* no data input */, 0 /* no data input */,
-                       (char **)&out_buf, &ret_data_len);
+                       CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
        if (rc == -EOPNOTSUPP) {
                cifs_dbg(FYI,
                         "server does not support query network interfaces\n");
@@ -717,32 +717,28 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
        oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId);
 #endif /* CIFS_DEBUG2 */
 
-       if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE)
-               oplock = smb2_parse_lease_state(server, o_rsp,
-                                               &oparms.fid->epoch,
-                                               oparms.fid->lease_key);
-       else
-               goto oshr_exit;
-
-
        memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
        tcon->crfid.tcon = tcon;
        tcon->crfid.is_valid = true;
        kref_init(&tcon->crfid.refcount);
-       kref_get(&tcon->crfid.refcount);
 
+       if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
+               kref_get(&tcon->crfid.refcount);
+               oplock = smb2_parse_lease_state(server, o_rsp,
+                                               &oparms.fid->epoch,
+                                               oparms.fid->lease_key);
+       } else
+               goto oshr_exit;
 
        qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
        if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
                goto oshr_exit;
-       rc = smb2_validate_and_copy_iov(
+       if (!smb2_validate_and_copy_iov(
                                le16_to_cpu(qi_rsp->OutputBufferOffset),
                                sizeof(struct smb2_file_all_info),
                                &rsp_iov[1], sizeof(struct smb2_file_all_info),
-                               (char *)&tcon->crfid.file_all_info);
-       if (rc)
-               goto oshr_exit;
-       tcon->crfid.file_all_info_is_valid = 1;
+                               (char *)&tcon->crfid.file_all_info))
+               tcon->crfid.file_all_info_is_valid = 1;
 
  oshr_exit:
        mutex_unlock(&tcon->crfid.fid_mutex);
@@ -1299,7 +1295,7 @@ SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
 
        rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
                        FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
-                       NULL, 0 /* no input */,
+                       NULL, 0 /* no input */, CIFSMaxBufSize,
                        (char **)&res_key, &ret_data_len);
 
        if (rc) {
@@ -1404,7 +1400,7 @@ smb2_ioctl_query_info(const unsigned int xid,
                        rc = SMB2_ioctl_init(tcon, &rqst[1],
                                             COMPOUND_FID, COMPOUND_FID,
                                             qi.info_type, true, NULL,
-                                            0);
+                                            0, CIFSMaxBufSize);
                }
        } else if (qi.flags == PASSTHRU_QUERY_INFO) {
                memset(&qi_iov, 0, sizeof(qi_iov));
@@ -1532,8 +1528,8 @@ smb2_copychunk_range(const unsigned int xid,
                rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
                        trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
                        true /* is_fsctl */, (char *)pcchunk,
-                       sizeof(struct copychunk_ioctl), (char **)&retbuf,
-                       &ret_data_len);
+                       sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
+                       (char **)&retbuf, &ret_data_len);
                if (rc == 0) {
                        if (ret_data_len !=
                                        sizeof(struct copychunk_ioctl_rsp)) {
@@ -1693,7 +1689,7 @@ static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
        rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
                        cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
                        true /* is_fctl */,
-                       &setsparse, 1, NULL, NULL);
+                       &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
        if (rc) {
                tcon->broken_sparse_sup = true;
                cifs_dbg(FYI, "set sparse rc = %d\n", rc);
@@ -1766,7 +1762,7 @@ smb2_duplicate_extents(const unsigned int xid,
                        true /* is_fsctl */,
                        (char *)&dup_ext_buf,
                        sizeof(struct duplicate_extents_to_file),
-                       NULL,
+                       CIFSMaxBufSize, NULL,
                        &ret_data_len);
 
        if (ret_data_len > 0)
@@ -1801,7 +1797,7 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
                        true /* is_fsctl */,
                        (char *)&integr_info,
                        sizeof(struct fsctl_set_integrity_information_req),
-                       NULL,
+                       CIFSMaxBufSize, NULL,
                        &ret_data_len);
 
 }
@@ -1809,6 +1805,8 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
 /* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
 #define GMT_TOKEN_SIZE 50
 
+#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
+
 /*
  * Input buffer contains (empty) struct smb_snapshot array with size filled in
  * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
@@ -1820,13 +1818,29 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
        char *retbuf = NULL;
        unsigned int ret_data_len = 0;
        int rc;
+       u32 max_response_size;
        struct smb_snapshot_array snapshot_in;
 
+       if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
+               return -EFAULT;
+
+       /*
+        * Note that for snapshot queries that servers like Azure expect that
+        * the first query be minimal size (and just used to get the number/size
+        * of previous versions) so response size must be specified as EXACTLY
+        * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
+        * of eight bytes.
+        */
+       if (ret_data_len == 0)
+               max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
+       else
+               max_response_size = CIFSMaxBufSize;
+
        rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
                        cfile->fid.volatile_fid,
                        FSCTL_SRV_ENUMERATE_SNAPSHOTS,
                        true /* is_fsctl */,
-                       NULL, 0 /* no input data */,
+                       NULL, 0 /* no input data */, max_response_size,
                        (char **)&retbuf,
                        &ret_data_len);
        cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
@@ -2304,7 +2318,7 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
                rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
                                FSCTL_DFS_GET_REFERRALS,
                                true /* is_fsctl */,
-                               (char *)dfs_req, dfs_req_size,
+                               (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
                                (char **)&dfs_rsp, &dfs_rsp_size);
        } while (rc == -EAGAIN);
 
@@ -2375,6 +2389,8 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
 
        rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov,
                       &resp_buftype);
+       if (!rc)
+               SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
        if (!rc || !err_iov.iov_base) {
                rc = -ENOENT;
                goto free_path;
@@ -2658,7 +2674,8 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
        rc = SMB2_ioctl_init(tcon, &rqst[num++], cfile->fid.persistent_fid,
                             cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
                             true /* is_fctl */, (char *)&fsctl_buf,
-                            sizeof(struct file_zero_data_information));
+                            sizeof(struct file_zero_data_information),
+                            CIFSMaxBufSize);
        if (rc)
                goto zero_range_exit;
 
@@ -2735,7 +2752,8 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
        rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
                        cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
                        true /* is_fctl */, (char *)&fsctl_buf,
-                       sizeof(struct file_zero_data_information), NULL, NULL);
+                       sizeof(struct file_zero_data_information),
+                       CIFSMaxBufSize, NULL, NULL);
        free_xid(xid);
        return rc;
 }
index 21ac19ff19cb2c3257f524f4aef90f0de2e8d342..b8f7262ac35412f3d1f13743ba4555f8756018b5 100644 (file)
@@ -832,8 +832,11 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
                } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
                        /* ops set to 3.0 by default for default so update */
                        ses->server->ops = &smb21_operations;
-               } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID))
+                       ses->server->vals = &smb21_values;
+               } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
                        ses->server->ops = &smb311_operations;
+                       ses->server->vals = &smb311_values;
+               }
        } else if (le16_to_cpu(rsp->DialectRevision) !=
                                ses->server->vals->protocol_id) {
                /* if requested single dialect ensure returned dialect matched */
@@ -1002,7 +1005,8 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
 
        rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
                FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
-               (char *)pneg_inbuf, inbuflen, (char **)&pneg_rsp, &rsplen);
+               (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize,
+               (char **)&pneg_rsp, &rsplen);
        if (rc == -EOPNOTSUPP) {
                /*
                 * Old Windows versions or Netapp SMB server can return
@@ -1858,8 +1862,9 @@ add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
 }
 
 static struct create_durable_v2 *
-create_durable_v2_buf(struct cifs_fid *pfid)
+create_durable_v2_buf(struct cifs_open_parms *oparms)
 {
+       struct cifs_fid *pfid = oparms->fid;
        struct create_durable_v2 *buf;
 
        buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL);
@@ -1873,7 +1878,14 @@ create_durable_v2_buf(struct cifs_fid *pfid)
                                (struct create_durable_v2, Name));
        buf->ccontext.NameLength = cpu_to_le16(4);
 
-       buf->dcontext.Timeout = 0; /* Should this be configurable by workload */
+       /*
+        * NB: Handle timeout defaults to 0, which allows server to choose
+        * (most servers default to 120 seconds) and most clients default to 0.
+        * This can be overridden at mount ("handletimeout=") if the user wants
+        * a different persistent (or resilient) handle timeout for all opens
+        * opens on a particular SMB3 mount.
+        */
+       buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout);
        buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
        generate_random_uuid(buf->dcontext.CreateGuid);
        memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
@@ -1926,7 +1938,7 @@ add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
        struct smb2_create_req *req = iov[0].iov_base;
        unsigned int num = *num_iovec;
 
-       iov[num].iov_base = create_durable_v2_buf(oparms->fid);
+       iov[num].iov_base = create_durable_v2_buf(oparms);
        if (iov[num].iov_base == NULL)
                return -ENOMEM;
        iov[num].iov_len = sizeof(struct create_durable_v2);
@@ -2478,7 +2490,8 @@ creat_exit:
 int
 SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
                u64 persistent_fid, u64 volatile_fid, u32 opcode,
-               bool is_fsctl, char *in_data, u32 indatalen)
+               bool is_fsctl, char *in_data, u32 indatalen,
+               __u32 max_response_size)
 {
        struct smb2_ioctl_req *req;
        struct kvec *iov = rqst->rq_iov;
@@ -2520,16 +2533,21 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
        req->OutputCount = 0; /* MBZ */
 
        /*
-        * Could increase MaxOutputResponse, but that would require more
-        * than one credit. Windows typically sets this smaller, but for some
+        * In most cases max_response_size is set to 16K (CIFSMaxBufSize)
+        * We Could increase default MaxOutputResponse, but that could require
+        * more credits. Windows typically sets this smaller, but for some
         * ioctls it may be useful to allow server to send more. No point
         * limiting what the server can send as long as fits in one credit
-        * Unfortunately - we can not handle more than CIFS_MAX_MSG_SIZE
-        * (by default, note that it can be overridden to make max larger)
-        * in responses (except for read responses which can be bigger.
-        * We may want to bump this limit up
+        * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want
+        * to increase this limit up in the future.
+        * Note that for snapshot queries that servers like Azure expect that
+        * the first query be minimal size (and just used to get the number/size
+        * of previous versions) so response size must be specified as EXACTLY
+        * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
+        * of eight bytes.  Currently that is the only case where we set max
+        * response size smaller.
         */
-       req->MaxOutputResponse = cpu_to_le32(CIFSMaxBufSize);
+       req->MaxOutputResponse = cpu_to_le32(max_response_size);
 
        if (is_fsctl)
                req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
@@ -2550,13 +2568,14 @@ SMB2_ioctl_free(struct smb_rqst *rqst)
                cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
 }
 
+
 /*
  *     SMB2 IOCTL is used for both IOCTLs and FSCTLs
  */
 int
 SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
           u64 volatile_fid, u32 opcode, bool is_fsctl,
-          char *in_data, u32 indatalen,
+          char *in_data, u32 indatalen, u32 max_out_data_len,
           char **out_data, u32 *plen /* returned data len */)
 {
        struct smb_rqst rqst;
@@ -2593,8 +2612,8 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        rqst.rq_iov = iov;
        rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE;
 
-       rc = SMB2_ioctl_init(tcon, &rqst, persistent_fid, volatile_fid,
-                            opcode, is_fsctl, in_data, indatalen);
+       rc = SMB2_ioctl_init(tcon, &rqst, persistent_fid, volatile_fid, opcode,
+                            is_fsctl, in_data, indatalen, max_out_data_len);
        if (rc)
                goto ioctl_exit;
 
@@ -2672,7 +2691,8 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
        rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
                        FSCTL_SET_COMPRESSION, true /* is_fsctl */,
                        (char *)&fsctl_input /* data input */,
-                       2 /* in data len */, &ret_data /* out data */, NULL);
+                       2 /* in data len */, CIFSMaxBufSize /* max out data */,
+                       &ret_data /* out data */, NULL);
 
        cifs_dbg(FYI, "set compression rc %d\n", rc);
 
@@ -3431,8 +3451,6 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
        rqst.rq_nvec = 1;
 
        rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
-       cifs_small_buf_release(req);
-
        rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
 
        if (rc) {
@@ -3454,6 +3472,8 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
                                    io_parms->tcon->tid, ses->Suid,
                                    io_parms->offset, io_parms->length);
 
+       cifs_small_buf_release(req);
+
        *nbytes = le32_to_cpu(rsp->DataLength);
        if ((*nbytes > CIFS_MAX_MSGSIZE) ||
            (*nbytes > io_parms->length)) {
@@ -3752,7 +3772,6 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
 
        rc = cifs_send_recv(xid, io_parms->tcon->ses, &rqst,
                            &resp_buftype, flags, &rsp_iov);
-       cifs_small_buf_release(req);
        rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
 
        if (rc) {
@@ -3770,6 +3789,7 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
                                     io_parms->offset, *nbytes);
        }
 
+       cifs_small_buf_release(req);
        free_rsp_buf(resp_buftype, rsp);
        return rc;
 }
index 3c32d0cfea69b0c7191336e5b38247de578ed63b..52df125e918984139b176a5f4101ae6da11c7e0f 100644 (file)
@@ -142,11 +142,12 @@ extern int SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
 extern void SMB2_open_free(struct smb_rqst *rqst);
 extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon,
                     u64 persistent_fid, u64 volatile_fid, u32 opcode,
-                    bool is_fsctl, char *in_data, u32 indatalen,
+                    bool is_fsctl, char *in_data, u32 indatalen, u32 maxoutlen,
                     char **out_data, u32 *plen /* returned data len */);
 extern int SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
                           u64 persistent_fid, u64 volatile_fid, u32 opcode,
-                          bool is_fsctl, char *in_data, u32 indatalen);
+                          bool is_fsctl, char *in_data, u32 indatalen,
+                          __u32 max_response_size);
 extern void SMB2_ioctl_free(struct smb_rqst *rqst);
 extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
                      u64 persistent_file_id, u64 volatile_file_id);
index ca0671d55aa699df6723ffb897706b6579c68780..e5e54da1715f630cf1471e625e72045b6f31e112 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -33,6 +33,7 @@
 #include <linux/sizes.h>
 #include <linux/mmu_notifier.h>
 #include <linux/iomap.h>
+#include <asm/pgalloc.h>
 #include "internal.h"
 
 #define CREATE_TRACE_POINTS
@@ -1407,7 +1408,9 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
 {
        struct address_space *mapping = vmf->vma->vm_file->f_mapping;
        unsigned long pmd_addr = vmf->address & PMD_MASK;
+       struct vm_area_struct *vma = vmf->vma;
        struct inode *inode = mapping->host;
+       pgtable_t pgtable = NULL;
        struct page *zero_page;
        spinlock_t *ptl;
        pmd_t pmd_entry;
@@ -1422,12 +1425,22 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
        *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
                        DAX_PMD | DAX_ZERO_PAGE, false);
 
+       if (arch_needs_pgtable_deposit()) {
+               pgtable = pte_alloc_one(vma->vm_mm);
+               if (!pgtable)
+                       return VM_FAULT_OOM;
+       }
+
        ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
        if (!pmd_none(*(vmf->pmd))) {
                spin_unlock(ptl);
                goto fallback;
        }
 
+       if (pgtable) {
+               pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
+               mm_inc_nr_ptes(vma->vm_mm);
+       }
        pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
        pmd_entry = pmd_mkhuge(pmd_entry);
        set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
@@ -1436,6 +1449,8 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
        return VM_FAULT_NOPAGE;
 
 fallback:
+       if (pgtable)
+               pte_free(vma->vm_mm, pgtable);
        trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
        return VM_FAULT_FALLBACK;
 }
index 95b5e78c22b1e98811d3aca9c64c2c5deb54c6fe..f25daa207421c50cf38b1e4771ef5ab3332e9be1 100644 (file)
@@ -163,19 +163,24 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
        return 0;
 }
 
-static void debugfs_evict_inode(struct inode *inode)
+static void debugfs_i_callback(struct rcu_head *head)
 {
-       truncate_inode_pages_final(&inode->i_data);
-       clear_inode(inode);
+       struct inode *inode = container_of(head, struct inode, i_rcu);
        if (S_ISLNK(inode->i_mode))
                kfree(inode->i_link);
+       free_inode_nonrcu(inode);
+}
+
+static void debugfs_destroy_inode(struct inode *inode)
+{
+       call_rcu(&inode->i_rcu, debugfs_i_callback);
 }
 
 static const struct super_operations debugfs_super_operations = {
        .statfs         = simple_statfs,
        .remount_fs     = debugfs_remount,
        .show_options   = debugfs_show_options,
-       .evict_inode    = debugfs_evict_inode,
+       .destroy_inode  = debugfs_destroy_inode,
 };
 
 static void debugfs_release_dentry(struct dentry *dentry)
index 8a63e52785e978a6792542d96c66cb202e5f1d1b..9971a35cf1ef66c960862ef550197814052b9267 100644 (file)
@@ -2056,10 +2056,8 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
                rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
 
        ret = -EINVAL;
-       if (rem < len) {
-               pipe_unlock(pipe);
-               goto out;
-       }
+       if (rem < len)
+               goto out_free;
 
        rem = len;
        while (rem) {
@@ -2077,7 +2075,9 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
                        pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
                        pipe->nrbufs--;
                } else {
-                       pipe_buf_get(pipe, ibuf);
+                       if (!pipe_buf_get(pipe, ibuf))
+                               goto out_free;
+
                        *obuf = *ibuf;
                        obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
                        obuf->len = rem;
@@ -2100,11 +2100,11 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
        ret = fuse_dev_do_write(fud, &cs, len);
 
        pipe_lock(pipe);
+out_free:
        for (idx = 0; idx < nbuf; idx++)
                pipe_buf_release(pipe, &bufs[idx]);
        pipe_unlock(pipe);
 
-out:
        kvfree(bufs);
        return ret;
 }
index ec32fece5e1e9d80e726b7e8202cf214673d7a5a..9285dd4f4b1ce3ed2601daa62cb30de6d0eed92b 100644 (file)
@@ -755,11 +755,17 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
                                        umode_t mode, dev_t dev)
 {
        struct inode *inode;
-       struct resv_map *resv_map;
+       struct resv_map *resv_map = NULL;
 
-       resv_map = resv_map_alloc();
-       if (!resv_map)
-               return NULL;
+       /*
+        * Reserve maps are only needed for inodes that can have associated
+        * page allocations.
+        */
+       if (S_ISREG(mode) || S_ISLNK(mode)) {
+               resv_map = resv_map_alloc();
+               if (!resv_map)
+                       return NULL;
+       }
 
        inode = new_inode(sb);
        if (inode) {
@@ -794,8 +800,10 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
                        break;
                }
                lockdep_annotate_inode_mutex_key(inode);
-       } else
-               kref_put(&resv_map->refs, resv_map_release);
+       } else {
+               if (resv_map)
+                       kref_put(&resv_map->refs, resv_map_release);
+       }
 
        return inode;
 }
index bbdbd56cf2ac9384b83e78945b2c6c0031cc346d..f65f85d892174f252cdd2ae48c4d9eb3902ef479 100644 (file)
@@ -338,7 +338,7 @@ static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
        tail = ctx->cached_cq_tail;
        /* See comment at the top of the file */
        smp_rmb();
-       if (tail + 1 == READ_ONCE(ring->r.head))
+       if (tail - READ_ONCE(ring->r.head) == ring->ring_entries)
                return NULL;
 
        ctx->cached_cq_tail++;
@@ -682,11 +682,9 @@ static void io_iopoll_req_issued(struct io_kiocb *req)
                list_add_tail(&req->list, &ctx->poll_list);
 }
 
-static void io_file_put(struct io_submit_state *state, struct file *file)
+static void io_file_put(struct io_submit_state *state)
 {
-       if (!state) {
-               fput(file);
-       } else if (state->file) {
+       if (state->file) {
                int diff = state->has_refs - state->used_refs;
 
                if (diff)
@@ -711,7 +709,7 @@ static struct file *io_file_get(struct io_submit_state *state, int fd)
                        state->ios_left--;
                        return state->file;
                }
-               io_file_put(state, NULL);
+               io_file_put(state);
        }
        state->file = fget_many(fd, state->ios_left);
        if (!state->file)
@@ -1671,7 +1669,7 @@ out:
 static void io_submit_state_end(struct io_submit_state *state)
 {
        blk_finish_plug(&state->plug);
-       io_file_put(state, NULL);
+       io_file_put(state);
        if (state->free_reqs)
                kmem_cache_free_bulk(req_cachep, state->free_reqs,
                                        &state->reqs[state->cur_req]);
@@ -1920,6 +1918,10 @@ static int io_sq_thread(void *data)
                unuse_mm(cur_mm);
                mmput(cur_mm);
        }
+
+       if (kthread_should_park())
+               kthread_parkme();
+
        return 0;
 }
 
@@ -2054,6 +2056,7 @@ static void io_sq_thread_stop(struct io_ring_ctx *ctx)
        if (ctx->sqo_thread) {
                ctx->sqo_stop = 1;
                mb();
+               kthread_park(ctx->sqo_thread);
                kthread_stop(ctx->sqo_thread);
                ctx->sqo_thread = NULL;
        }
@@ -2215,6 +2218,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
                        fput(ctx->user_files[i]);
 
                kfree(ctx->user_files);
+               ctx->user_files = NULL;
                ctx->nr_user_files = 0;
                return ret;
        }
@@ -2235,19 +2239,27 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
        mmgrab(current->mm);
        ctx->sqo_mm = current->mm;
 
-       ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
-       if (!ctx->sq_thread_idle)
-               ctx->sq_thread_idle = HZ;
-
        ret = -EINVAL;
        if (!cpu_possible(p->sq_thread_cpu))
                goto err;
 
        if (ctx->flags & IORING_SETUP_SQPOLL) {
+               ret = -EPERM;
+               if (!capable(CAP_SYS_ADMIN))
+                       goto err;
+
+               ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
+               if (!ctx->sq_thread_idle)
+                       ctx->sq_thread_idle = HZ;
+
                if (p->flags & IORING_SETUP_SQ_AFF) {
                        int cpu;
 
                        cpu = array_index_nospec(p->sq_thread_cpu, NR_CPUS);
+                       ret = -EINVAL;
+                       if (!cpu_possible(p->sq_thread_cpu))
+                               goto err;
+
                        ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
                                                        ctx, cpu,
                                                        "io_uring-sq");
@@ -2917,11 +2929,23 @@ SYSCALL_DEFINE2(io_uring_setup, u32, entries,
 
 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
                               void __user *arg, unsigned nr_args)
+       __releases(ctx->uring_lock)
+       __acquires(ctx->uring_lock)
 {
        int ret;
 
        percpu_ref_kill(&ctx->refs);
+
+       /*
+        * Drop uring mutex before waiting for references to exit. If another
+        * thread is currently inside io_uring_enter() it might need to grab
+        * the uring_lock to make progress. If we hold it here across the drain
+        * wait, then we can deadlock. It's safe to drop the mutex here, since
+        * no new references will come in after we've killed the percpu ref.
+        */
+       mutex_unlock(&ctx->uring_lock);
        wait_for_completion(&ctx->ctx_done);
+       mutex_lock(&ctx->uring_lock);
 
        switch (opcode) {
        case IORING_REGISTER_BUFFERS:
index 389ea53ea487538061ff3b6da78e27df2f894ac1..bccfc40b3a74ab002e45a07149afbe09634d8f64 100644 (file)
@@ -1414,11 +1414,6 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
 
        jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
 
-       if (f->target) {
-               kfree(f->target);
-               f->target = NULL;
-       }
-
        fds = f->dents;
        while(fds) {
                fd = fds;
index bb6ae387469f4d020424bfb13333e24d84e68123..05d892c79339f97276c81337c26929d8f53b7db2 100644 (file)
@@ -47,7 +47,10 @@ static struct inode *jffs2_alloc_inode(struct super_block *sb)
 static void jffs2_i_callback(struct rcu_head *head)
 {
        struct inode *inode = container_of(head, struct inode, i_rcu);
-       kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode));
+       struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+
+       kfree(f->target);
+       kmem_cache_free(jffs2_inode_cachep, f);
 }
 
 static void jffs2_destroy_inode(struct inode *inode)
index ff6f85fb676b7c1094878b269d35f2f127ca5fb5..5196bfa7894d21c0eb1220f4d9bb9e51e2593afb 100644 (file)
@@ -329,9 +329,6 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
        };
        ssize_t err, err2;
 
-       if (!nfs_server_capable(file_inode(dst), NFS_CAP_COPY))
-               return -EOPNOTSUPP;
-
        src_lock = nfs_get_lock_context(nfs_file_open_context(src));
        if (IS_ERR(src_lock))
                return PTR_ERR(src_lock);
index 45b2322e092d2455b508a8fdc00f5bde0b73c4e9..00d17198ee12aa7f6177bd3c1c5830fa655d1033 100644 (file)
@@ -133,8 +133,10 @@ static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in,
                                    struct file *file_out, loff_t pos_out,
                                    size_t count, unsigned int flags)
 {
+       if (!nfs_server_capable(file_inode(file_out), NFS_CAP_COPY))
+               return -EOPNOTSUPP;
        if (file_inode(file_in) == file_inode(file_out))
-               return -EINVAL;
+               return -EOPNOTSUPP;
        return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count);
 }
 
index cfcabc33e24d01136ba00c336f90497f657fb0a5..602446158bfb56e1fe62b74411d276431baef8c6 100644 (file)
@@ -2589,7 +2589,7 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
                        ARRAY_SIZE(nfs4_acl_bitmap), &hdr);
 
        rpc_prepare_reply_pages(req, args->acl_pages, 0,
-                               args->acl_len, replen);
+                               args->acl_len, replen + 1);
        encode_nops(&hdr);
 }
 
@@ -2811,7 +2811,7 @@ static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req,
        }
 
        rpc_prepare_reply_pages(req, (struct page **)&args->page, 0,
-                               PAGE_SIZE, replen);
+                               PAGE_SIZE, replen + 1);
        encode_nops(&hdr);
 }
 
index 23790c7b2289d21328db2a824eef5c6484e29089..c27ac96a95bd3535bc893493492fdc7681aba1fe 100644 (file)
@@ -2041,7 +2041,8 @@ static int nfs23_validate_mount_data(void *options,
                memcpy(sap, &data->addr, sizeof(data->addr));
                args->nfs_server.addrlen = sizeof(data->addr);
                args->nfs_server.port = ntohs(data->addr.sin_port);
-               if (!nfs_verify_server_address(sap))
+               if (sap->sa_family != AF_INET ||
+                   !nfs_verify_server_address(sap))
                        goto out_no_address;
 
                if (!(data->flags & NFS_MOUNT_TCP))
index f1c2f855fd43c7664a739c2172f83be2f93944e4..a00350018a4792e758d5749e6294902cf32f2174 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -1215,3 +1215,21 @@ int nonseekable_open(struct inode *inode, struct file *filp)
 }
 
 EXPORT_SYMBOL(nonseekable_open);
+
+/*
+ * stream_open is used by subsystems that want stream-like file descriptors.
+ * Such file descriptors are not seekable and don't have notion of position
+ * (file.f_pos is always 0). Contrary to file descriptors of other regular
+ * files, .read() and .write() can run simultaneously.
+ *
+ * stream_open never fails and is marked to return int so that it could be
+ * directly used as file_operations.open .
+ */
+int stream_open(struct inode *inode, struct file *filp)
+{
+       filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE | FMODE_ATOMIC_POS);
+       filp->f_mode |= FMODE_STREAM;
+       return 0;
+}
+
+EXPORT_SYMBOL(stream_open);
index 070aad543382a4e30aa0bd5eef94b5dabba7219d..41065901106b09d4365ebc13ee6cfa7b6465339b 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -188,9 +188,9 @@ EXPORT_SYMBOL(generic_pipe_buf_steal);
  *     in the tee() system call, when we duplicate the buffers in one
  *     pipe into another.
  */
-void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
+bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
 {
-       get_page(buf->page);
+       return try_get_page(buf->page);
 }
 EXPORT_SYMBOL(generic_pipe_buf_get);
 
index ddef482f133406737e09e5df4966aea9b6ec06aa..6a803a0b75df45af049fd655a7e5a729f436708c 100644 (file)
@@ -616,24 +616,25 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
                            struct pid *pid, struct task_struct *task)
 {
-       long nr;
-       unsigned long args[6], sp, pc;
+       struct syscall_info info;
+       u64 *args = &info.data.args[0];
        int res;
 
        res = lock_trace(task);
        if (res)
                return res;
 
-       if (task_current_syscall(task, &nr, args, 6, &sp, &pc))
+       if (task_current_syscall(task, &info))
                seq_puts(m, "running\n");
-       else if (nr < 0)
-               seq_printf(m, "%ld 0x%lx 0x%lx\n", nr, sp, pc);
+       else if (info.data.nr < 0)
+               seq_printf(m, "%d 0x%llx 0x%llx\n",
+                          info.data.nr, info.sp, info.data.instruction_pointer);
        else
                seq_printf(m,
-                      "%ld 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
-                      nr,
+                      "%d 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n",
+                      info.data.nr,
                       args[0], args[1], args[2], args[3], args[4], args[5],
-                      sp, pc);
+                      info.sp, info.data.instruction_pointer);
        unlock_trace(task);
 
        return 0;
index 92a91e7816d8472c3451a99a456f6f5b7b84c5b5..95ca1fe7283cff265247c6f3a84e5fa573299fca 100644 (file)
@@ -1143,6 +1143,24 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                                        count = -EINTR;
                                        goto out_mm;
                                }
+                               /*
+                                * Avoid to modify vma->vm_flags
+                                * without locked ops while the
+                                * coredump reads the vm_flags.
+                                */
+                               if (!mmget_still_valid(mm)) {
+                                       /*
+                                        * Silently return "count"
+                                        * like if get_task_mm()
+                                        * failed. FIXME: should this
+                                        * function have returned
+                                        * -ESRCH if get_task_mm()
+                                        * failed like if
+                                        * get_proc_task() fails?
+                                        */
+                                       up_write(&mm->mmap_sem);
+                                       goto out_mm;
+                               }
                                for (vma = mm->mmap; vma; vma = vma->vm_next) {
                                        vma->vm_flags &= ~VM_SOFTDIRTY;
                                        vma_set_page_prot(vma);
index 177ccc3d405a33b425998e400b105418d8f537a4..61b43ad7608e301336662d7cab1cf6a7bda8067c 100644 (file)
@@ -560,12 +560,13 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
 
 static inline loff_t file_pos_read(struct file *file)
 {
-       return file->f_pos;
+       return file->f_mode & FMODE_STREAM ? 0 : file->f_pos;
 }
 
 static inline void file_pos_write(struct file *file, loff_t pos)
 {
-       file->f_pos = pos;
+       if ((file->f_mode & FMODE_STREAM) == 0)
+               file->f_pos = pos;
 }
 
 ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count)
index 3ee7e82df48f2b14d09ddd3b4877879e90f8c418..98943d9b219c0cea1037770cbc6578970fbe69b6 100644 (file)
@@ -1593,7 +1593,11 @@ retry:
                         * Get a reference to this pipe buffer,
                         * so we can copy the contents over.
                         */
-                       pipe_buf_get(ipipe, ibuf);
+                       if (!pipe_buf_get(ipipe, ibuf)) {
+                               if (ret == 0)
+                                       ret = -EFAULT;
+                               break;
+                       }
                        *obuf = *ibuf;
 
                        /*
@@ -1667,7 +1671,11 @@ static int link_pipe(struct pipe_inode_info *ipipe,
                 * Get a reference to this pipe buffer,
                 * so we can copy the contents over.
                 */
-               pipe_buf_get(ipipe, ibuf);
+               if (!pipe_buf_get(ipipe, ibuf)) {
+                       if (ret == 0)
+                               ret = -EFAULT;
+                       break;
+               }
 
                obuf = opipe->bufs + nbuf;
                *obuf = *ibuf;
index 8dc2818fdd84990b74e07a8d479de994b8e5be9b..12628184772c04b27c975568101a4b5cddf442f8 100644 (file)
@@ -276,14 +276,12 @@ static void ubifs_i_callback(struct rcu_head *head)
 {
        struct inode *inode = container_of(head, struct inode, i_rcu);
        struct ubifs_inode *ui = ubifs_inode(inode);
+       kfree(ui->data);
        kmem_cache_free(ubifs_inode_slab, ui);
 }
 
 static void ubifs_destroy_inode(struct inode *inode)
 {
-       struct ubifs_inode *ui = ubifs_inode(inode);
-
-       kfree(ui->data);
        call_rcu(&inode->i_rcu, ubifs_i_callback);
 }
 
index 89800fc7dc9d562cd3557988adc766fa41c51209..f5de1e726356a51c27ff529f98d99032650eb839 100644 (file)
@@ -629,6 +629,8 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
 
                /* the various vma->vm_userfaultfd_ctx still points to it */
                down_write(&mm->mmap_sem);
+               /* no task can run (and in turn coredump) yet */
+               VM_WARN_ON(!mmget_still_valid(mm));
                for (vma = mm->mmap; vma; vma = vma->vm_next)
                        if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
                                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
@@ -883,6 +885,8 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
         * taking the mmap_sem for writing.
         */
        down_write(&mm->mmap_sem);
+       if (!mmget_still_valid(mm))
+               goto skip_mm;
        prev = NULL;
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                cond_resched();
@@ -905,6 +909,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
                vma->vm_flags = new_flags;
                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
        }
+skip_mm:
        up_write(&mm->mmap_sem);
        mmput(mm);
 wakeup:
@@ -1333,6 +1338,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
                goto out;
 
        down_write(&mm->mmap_sem);
+       if (!mmget_still_valid(mm))
+               goto out_unlock;
        vma = find_vma_prev(mm, start, &prev);
        if (!vma)
                goto out_unlock;
@@ -1520,6 +1527,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
                goto out;
 
        down_write(&mm->mmap_sem);
+       if (!mmget_still_valid(mm))
+               goto out_unlock;
        vma = find_vma_prev(mm, start, &prev);
        if (!vma)
                goto out_unlock;
index 0c938a4354f6f58f67e0bb3555dca24460afadb7..b88239e9efe49979a5c94a3c411f8f36ba09e541 100644 (file)
@@ -105,41 +105,30 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
  * syscall_get_arguments - extract system call parameter values
  * @task:      task of interest, must be blocked
  * @regs:      task_pt_regs() of @task
- * @i:         argument index [0,5]
- * @n:         number of arguments; n+i must be [1,6].
  * @args:      array filled with argument values
  *
- * Fetches @n arguments to the system call starting with the @i'th argument
- * (from 0 through 5).  Argument @i is stored in @args[0], and so on.
- * An arch inline version is probably optimal when @i and @n are constants.
+ * Fetches 6 arguments to the system call.  First argument is stored in
+*  @args[0], and so on.
  *
  * It's only valid to call this when @task is stopped for tracing on
  * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
- * It's invalid to call this with @i + @n > 6; we only support system calls
- * taking up to 6 arguments.
  */
 void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
-                          unsigned int i, unsigned int n, unsigned long *args);
+                          unsigned long *args);
 
 /**
  * syscall_set_arguments - change system call parameter value
  * @task:      task of interest, must be in system call entry tracing
  * @regs:      task_pt_regs() of @task
- * @i:         argument index [0,5]
- * @n:         number of arguments; n+i must be [1,6].
  * @args:      array of argument values to store
  *
- * Changes @n arguments to the system call starting with the @i'th argument.
- * Argument @i gets value @args[0], and so on.
- * An arch inline version is probably optimal when @i and @n are constants.
+ * Changes 6 arguments to the system call.
+ * The first argument gets value @args[0], and so on.
  *
  * It's only valid to call this when @task is stopped for tracing on
  * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
- * It's invalid to call this with @i + @n > 6; we only support system calls
- * taking up to 6 arguments.
  */
 void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
-                          unsigned int i, unsigned int n,
                           const unsigned long *args);
 
 /**
index cfb7be40bed7a55a453757b30bf20772590b718d..ce4de6b1e444a855d04cc9afe5c7ebca5833a1a6 100644 (file)
@@ -418,6 +418,8 @@ struct drm_crtc_helper_funcs {
         * Drivers can use the @old_crtc_state input parameter if the operations
         * needed to enable the CRTC don't depend solely on the new state but
         * also on the transition between the old state and the new state.
+        *
+        * This function is optional.
         */
        void (*atomic_enable)(struct drm_crtc *crtc,
                              struct drm_crtc_state *old_crtc_state);
@@ -441,6 +443,8 @@ struct drm_crtc_helper_funcs {
         * parameter @old_crtc_state which could be used to access the old
         * state. Atomic drivers should consider to use this one instead
         * of @disable.
+        *
+        * This function is optional.
         */
        void (*atomic_disable)(struct drm_crtc *crtc,
                               struct drm_crtc_state *old_crtc_state);
diff --git a/include/dt-bindings/clock/sifive-fu540-prci.h b/include/dt-bindings/clock/sifive-fu540-prci.h
new file mode 100644 (file)
index 0000000..6a0b70a
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2019 SiFive, Inc.
+ * Wesley Terpstra
+ * Paul Walmsley
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_SIFIVE_FU540_PRCI_H
+#define __DT_BINDINGS_CLOCK_SIFIVE_FU540_PRCI_H
+
+/* Clock indexes for use by Device Tree data and the PRCI driver */
+
+#define PRCI_CLK_COREPLL              0
+#define PRCI_CLK_DDRPLL                       1
+#define PRCI_CLK_GEMGXLPLL            2
+#define PRCI_CLK_TLCLK                3
+
+#endif
index 8063e8314eefbfbf75181622465a33f2fbe48ba8..6d487c5eba2cae612e58ef72b8712d8d97aa71b5 100644 (file)
 #define RESET_SD_EMMC_A                        44
 #define RESET_SD_EMMC_B                        45
 #define RESET_SD_EMMC_C                        46
-/*                                     47-60 */
+/*                                     47      */
+#define RESET_USB_PHY20                        48
+#define RESET_USB_PHY21                        49
+/*                                     50-60   */
 #define RESET_AUDIO_CODEC              61
 /*                                     62-63   */
 /*     RESET2                                  */
index adbcb681782604b5356a0b1db56a9a9af6177529..0071298b9b28eb41a313e7f29eb609bd84eb4785 100644 (file)
@@ -38,7 +38,7 @@ enum {
 
 int TSS_authhmac(unsigned char *digest, const unsigned char *key,
                        unsigned int keylen, unsigned char *h1,
-                       unsigned char *h2, unsigned char h3, ...);
+                       unsigned char *h2, unsigned int h3, ...);
 int TSS_checkhmac1(unsigned char *buffer,
                          const uint32_t command,
                          const unsigned char *ononce,
index bb6090aa165d362ae399194fdca8d58b7cb8f5bf..e584673c18814295e2c901ab16d69511bc5e7a37 100644 (file)
@@ -120,19 +120,23 @@ static inline bool bio_full(struct bio *bio)
        return bio->bi_vcnt >= bio->bi_max_vecs;
 }
 
-#define mp_bvec_for_each_segment(bv, bvl, i, iter_all)                 \
-       for (bv = bvec_init_iter_all(&iter_all);                        \
-               (iter_all.done < (bvl)->bv_len) &&                      \
-               (mp_bvec_next_segment((bvl), &iter_all), 1);            \
-               iter_all.done += bv->bv_len, i += 1)
+static inline bool bio_next_segment(const struct bio *bio,
+                                   struct bvec_iter_all *iter)
+{
+       if (iter->idx >= bio->bi_vcnt)
+               return false;
+
+       bvec_advance(&bio->bi_io_vec[iter->idx], iter);
+       return true;
+}
 
 /*
  * drivers should _never_ use the all version - the bio may have been split
  * before it got to the driver and the driver won't own all of it
  */
-#define bio_for_each_segment_all(bvl, bio, i, iter_all)                \
-       for (i = 0, iter_all.idx = 0; iter_all.idx < (bio)->bi_vcnt; iter_all.idx++)    \
-               mp_bvec_for_each_segment(bvl, &((bio)->bi_io_vec[iter_all.idx]), i, iter_all)
+#define bio_for_each_segment_all(bvl, bio, i, iter)                    \
+       for (i = 0, bvl = bvec_init_iter_all(&iter);                    \
+            bio_next_segment((bio), &iter); i++)
 
 static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
                                    unsigned bytes)
index 50fb0dee23e8662120461cd227cf11f548939339..d35b8ec1c485cba58a658b34edd2f9621cd20639 100644 (file)
@@ -34,41 +34,41 @@ static inline u32 __bitrev32(u32 x)
 
 #define __constant_bitrev32(x) \
 ({                                     \
-       u32 __x = x;                    \
-       __x = (__x >> 16) | (__x << 16);        \
-       __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8);      \
-       __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4);      \
-       __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2);      \
-       __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1);      \
-       __x;                                                            \
+       u32 ___x = x;                   \
+       ___x = (___x >> 16) | (___x << 16);     \
+       ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8);   \
+       ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4);   \
+       ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2);   \
+       ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1);   \
+       ___x;                                                           \
 })
 
 #define __constant_bitrev16(x) \
 ({                                     \
-       u16 __x = x;                    \
-       __x = (__x >> 8) | (__x << 8);  \
-       __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4);        \
-       __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2);        \
-       __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1);        \
-       __x;                                                            \
+       u16 ___x = x;                   \
+       ___x = (___x >> 8) | (___x << 8);       \
+       ___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4);     \
+       ___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2);     \
+       ___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1);     \
+       ___x;                                                           \
 })
 
 #define __constant_bitrev8x4(x) \
 ({                     \
-       u32 __x = x;    \
-       __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4);      \
-       __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2);      \
-       __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1);      \
-       __x;                                                            \
+       u32 ___x = x;   \
+       ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4);   \
+       ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2);   \
+       ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1);   \
+       ___x;                                                           \
 })
 
 #define __constant_bitrev8(x)  \
 ({                                     \
-       u8 __x = x;                     \
-       __x = (__x >> 4) | (__x << 4);  \
-       __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2);      \
-       __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1);      \
-       __x;                                                            \
+       u8 ___x = x;                    \
+       ___x = (___x >> 4) | (___x << 4);       \
+       ___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2);   \
+       ___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1);   \
+       ___x;                                                           \
 })
 
 #define bitrev32(x) \
index cb2aa7ecafff5cb772772db11d5ee06314f60359..db29928de46741b0887e6bd13ed577a59684d180 100644 (file)
@@ -302,6 +302,7 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
 void blk_mq_kick_requeue_list(struct request_queue *q);
 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
 bool blk_mq_complete_request(struct request *rq);
+void blk_mq_complete_request_sync(struct request *rq);
 bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
                           struct bio *bio);
 bool blk_mq_queue_stopped(struct request_queue *q);
index 5c58a3b2bf0038083b9dc2b88293349fa8afb22b..317ab30d29046baaa29d13d0213ef9c63c4bf95c 100644 (file)
@@ -548,7 +548,6 @@ struct request_queue {
        struct rcu_head         rcu_head;
        wait_queue_head_t       mq_freeze_wq;
        struct percpu_ref       q_usage_counter;
-       struct list_head        all_q_node;
 
        struct blk_mq_tag_set   *tag_set;
        struct list_head        tag_set_list;
index f6275c4da13a765fd60f3d34a9ca491d1d56db93..ff13cbc1887db7a3e2d4fe9f64916c34a8a44a49 100644 (file)
@@ -145,26 +145,33 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv,
 
 static inline struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all)
 {
-       iter_all->bv.bv_page = NULL;
        iter_all->done = 0;
+       iter_all->idx = 0;
 
        return &iter_all->bv;
 }
 
-static inline void mp_bvec_next_segment(const struct bio_vec *bvec,
-                                       struct bvec_iter_all *iter_all)
+static inline void bvec_advance(const struct bio_vec *bvec,
+                               struct bvec_iter_all *iter_all)
 {
        struct bio_vec *bv = &iter_all->bv;
 
-       if (bv->bv_page) {
+       if (iter_all->done) {
                bv->bv_page = nth_page(bv->bv_page, 1);
                bv->bv_offset = 0;
        } else {
-               bv->bv_page = bvec->bv_page;
-               bv->bv_offset = bvec->bv_offset;
+               bv->bv_page = bvec_nth_page(bvec->bv_page, bvec->bv_offset /
+                                           PAGE_SIZE);
+               bv->bv_offset = bvec->bv_offset & ~PAGE_MASK;
        }
        bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset,
                           bvec->bv_len - iter_all->done);
+       iter_all->done += bv->bv_len;
+
+       if (iter_all->done == bvec->bv_len) {
+               iter_all->idx++;
+               iter_all->done = 0;
+       }
 }
 
 /*
index 54357a258b358aa3961151c025cfef621ce5cbef..6ebc2098cfe1719a16ba177c567ed5a916955bb4 100644 (file)
@@ -1611,7 +1611,12 @@ efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
                           struct screen_info *si, efi_guid_t *proto,
                           unsigned long size);
 
-bool efi_runtime_disabled(void);
+#ifdef CONFIG_EFI
+extern bool efi_runtime_disabled(void);
+#else
+static inline bool efi_runtime_disabled(void) { return true; }
+#endif
+
 extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
 extern unsigned long efi_call_virt_save_flags(void);
 
index 2e9e2763bf47dbea239976034e32fd11f14826f7..6e8bc53740f050f63883ea6b7d077e5911bdca9f 100644 (file)
@@ -31,6 +31,7 @@ struct elevator_mq_ops {
        void (*exit_sched)(struct elevator_queue *);
        int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
        void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
+       void (*depth_updated)(struct blk_mq_hw_ctx *);
 
        bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
        bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *);
index 8b42df09b04c9c222e3fb863daf60bef0b116a7f..dd28e7679089128a75d5ed86f5f6f435422d77eb 100644 (file)
@@ -158,6 +158,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
 #define FMODE_OPENED           ((__force fmode_t)0x80000)
 #define FMODE_CREATED          ((__force fmode_t)0x100000)
 
+/* File is stream-like */
+#define FMODE_STREAM           ((__force fmode_t)0x200000)
+
 /* File was opened by fanotify and shouldn't generate fanotify events */
 #define FMODE_NONOTIFY         ((__force fmode_t)0x4000000)
 
@@ -3074,6 +3077,7 @@ extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t);
 extern loff_t no_seek_end_llseek(struct file *, loff_t, int);
 extern int generic_file_open(struct inode * inode, struct file * filp);
 extern int nonseekable_open(struct inode * inode, struct file * filp);
+extern int stream_open(struct inode * inode, struct file * filp);
 
 #ifdef CONFIG_BLOCK
 typedef void (dio_submit_t)(struct bio *bio, struct inode *inode,
index 34a5036debd341935a100b6fe4a7083db6262d5e..2d14e21c16c0b412535d00b3a537bf3353366d4a 100644 (file)
@@ -47,8 +47,8 @@
 
 #define u64_to_user_ptr(x) (           \
 {                                      \
-       typecheck(u64, x);              \
-       (void __user *)(uintptr_t)x;    \
+       typecheck(u64, (x));            \
+       (void __user *)(uintptr_t)(x);  \
 }                                      \
 )
 
index 201f0f2683f25bd382267042f9f7dbec8460f093..9a897256e481f311a1de41e448c52710d2c2f247 100644 (file)
@@ -173,6 +173,7 @@ struct kretprobe_instance {
        struct kretprobe *rp;
        kprobe_opcode_t *ret_addr;
        struct task_struct *task;
+       void *fp;
        char data[0];
 };
 
index 9d55c63db09b5dcb9ac997d802cb00ff356d4353..640a03642766bb4ae02c86e3606318c80adaf81d 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/irqbypass.h>
 #include <linux/swait.h>
 #include <linux/refcount.h>
+#include <linux/nospec.h>
 #include <asm/signal.h>
 
 #include <linux/kvm.h>
@@ -513,10 +514,10 @@ static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
 
 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
 {
-       /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case
-        * the caller has read kvm->online_vcpus before (as is the case
-        * for kvm_for_each_vcpu, for example).
-        */
+       int num_vcpus = atomic_read(&kvm->online_vcpus);
+       i = array_index_nospec(i, num_vcpus);
+
+       /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu.  */
        smp_rmb();
        return kvm->vcpus[i];
 }
@@ -600,6 +601,7 @@ void kvm_put_kvm(struct kvm *kvm);
 
 static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
 {
+       as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
        return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
                        lockdep_is_held(&kvm->slots_lock) ||
                        !refcount_read(&kvm->users_count));
index 03b6ba2a63f8c72109d41e180f5550142d29b96e..52aa4821093aa9db56273c51fb32bbce79f16754 100644 (file)
@@ -1,4 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2013-2016, Intel Corporation. All rights reserved.
+ */
 #ifndef _LINUX_MEI_CL_BUS_H
 #define _LINUX_MEI_CL_BUS_H
 
index 1f3d880b7ca1736057546bc0b98997b6b8a65aea..dbb6118370c1e3c6df88b7da5695aea57dcd08ad 100644 (file)
@@ -566,7 +566,10 @@ struct mem_cgroup *lock_page_memcg(struct page *page);
 void __unlock_page_memcg(struct mem_cgroup *memcg);
 void unlock_page_memcg(struct page *page);
 
-/* idx can be of type enum memcg_stat_item or node_stat_item */
+/*
+ * idx can be of type enum memcg_stat_item or node_stat_item.
+ * Keep in sync with memcg_exact_page_state().
+ */
 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
                                             int idx)
 {
index 6fee8b1a4400842a7db69b0a08d6bc3edf436854..5cd824c1c0caa8c9adda4a8e6d640f43605cd4fb 100644 (file)
@@ -469,7 +469,7 @@ static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising)
        if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
                              advertising))
                lcl_adv |= ADVERTISE_PAUSE_CAP;
-       if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
                              advertising))
                lcl_adv |= ADVERTISE_PAUSE_ASYM;
 
index 022541dc5dbfd7b12a54601c1d1a59e30eed8a37..0d07296488448b28b608cc7795de638aef185ba6 100644 (file)
@@ -594,6 +594,8 @@ enum mlx5_pagefault_type_flags {
 };
 
 struct mlx5_td {
+       /* protects tirs list changes while tirs refresh */
+       struct mutex     list_lock;
        struct list_head tirs_list;
        u32              tdn;
 };
index 76769749b5a5d546daf7f7513318ad65a494da4c..6b10c21630f54bdd14ddd6efa1510777165ec558 100644 (file)
@@ -966,6 +966,10 @@ static inline bool is_pci_p2pdma_page(const struct page *page)
 }
 #endif /* CONFIG_DEV_PAGEMAP_OPS */
 
+/* 127: arbitrary random number, small enough to assemble well */
+#define page_ref_zero_or_close_to_overflow(page) \
+       ((unsigned int) page_ref_count(page) + 127u <= 127u)
+
 static inline void get_page(struct page *page)
 {
        page = compound_head(page);
@@ -973,8 +977,17 @@ static inline void get_page(struct page *page)
         * Getting a normal page or the head of a compound page
         * requires to already have an elevated page->_refcount.
         */
-       VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
+       VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
+       page_ref_inc(page);
+}
+
+static inline __must_check bool try_get_page(struct page *page)
+{
+       page = compound_head(page);
+       if (WARN_ON_ONCE(page_ref_count(page) <= 0))
+               return false;
        page_ref_inc(page);
+       return true;
 }
 
 static inline void put_page(struct page *page)
index 7eade9132f02e4f85f423404341c6241d335f4d4..4ef4bbe78a1da163fee585597b57ff215c819be6 100644 (file)
@@ -671,7 +671,7 @@ enum vm_fault_reason {
 
 /* Encode hstate index for a hwpoisoned large page */
 #define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16))
-#define VM_FAULT_GET_HINDEX(x) (((x) >> 16) & 0xf)
+#define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf)
 
 #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS |       \
                        VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON |  \
index 26f69cf763f43dd1e0d61c0692f649aab21baeea..324e872c91d15407b6804c297de56cd480b215f8 100644 (file)
@@ -1500,6 +1500,7 @@ struct net_device_ops {
  * @IFF_FAILOVER: device is a failover master device
  * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
  * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
+ * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running
  */
 enum netdev_priv_flags {
        IFF_802_1Q_VLAN                 = 1<<0,
@@ -1532,6 +1533,7 @@ enum netdev_priv_flags {
        IFF_FAILOVER                    = 1<<27,
        IFF_FAILOVER_SLAVE              = 1<<28,
        IFF_L3MDEV_RX_HANDLER           = 1<<29,
+       IFF_LIVE_RENAME_OK              = 1<<30,
 };
 
 #define IFF_802_1Q_VLAN                        IFF_802_1Q_VLAN
@@ -1563,6 +1565,7 @@ enum netdev_priv_flags {
 #define IFF_FAILOVER                   IFF_FAILOVER
 #define IFF_FAILOVER_SLAVE             IFF_FAILOVER_SLAVE
 #define IFF_L3MDEV_RX_HANDLER          IFF_L3MDEV_RX_HANDLER
+#define IFF_LIVE_RENAME_OK             IFF_LIVE_RENAME_OK
 
 /**
  *     struct net_device - The DEVICE structure.
index baa49e6a23cc7a16092962e4d4a6bfbba9132376..c40720cb59acc4190d40aafdfe23b42ef8159cf9 100644 (file)
@@ -967,8 +967,13 @@ struct nvme_get_log_page_command {
        __le16                  numdl;
        __le16                  numdu;
        __u16                   rsvd11;
-       __le32                  lpol;
-       __le32                  lpou;
+       union {
+               struct {
+                       __le32 lpol;
+                       __le32 lpou;
+               };
+               __le64 lpo;
+       };
        __u32                   rsvd14[2];
 };
 
index 787d224ff43e1fc72ceed0164bf26a4f2fa08794..abb2dac3da9b9cf69b477c2d3726e019a0352b7a 100644 (file)
@@ -101,18 +101,20 @@ struct pipe_buf_operations {
        /*
         * Get a reference to the pipe buffer.
         */
-       void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
+       bool (*get)(struct pipe_inode_info *, struct pipe_buffer *);
 };
 
 /**
  * pipe_buf_get - get a reference to a pipe_buffer
  * @pipe:      the pipe that the buffer belongs to
  * @buf:       the buffer to get a reference to
+ *
+ * Return: %true if the reference was successfully obtained.
  */
-static inline void pipe_buf_get(struct pipe_inode_info *pipe,
+static inline __must_check bool pipe_buf_get(struct pipe_inode_info *pipe,
                                struct pipe_buffer *buf)
 {
-       buf->ops->get(pipe, buf);
+       return buf->ops->get(pipe, buf);
 }
 
 /**
@@ -171,7 +173,7 @@ struct pipe_inode_info *alloc_pipe_info(void);
 void free_pipe_info(struct pipe_inode_info *);
 
 /* Generic pipe buffer ops functions */
-void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
+bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
 int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
 int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
 void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
index 3ab892208343c2d22d71a630f1097d6941534518..7a37ac27d0fb21d9f8afde973c7618e42574534b 100644 (file)
@@ -35,10 +35,13 @@ struct pmc_clk {
  *
  * @base:      PMC clock register base offset
  * @clks:      pointer to set of registered clocks, typically 0..5
+ * @critical:  flag to indicate if firmware enabled pmc_plt_clks
+ *             should be marked as critial or not
  */
 struct pmc_clk_data {
        void __iomem *base;
        const struct pmc_clk *clks;
+       bool critical;
 };
 
 #endif /* __PLATFORM_DATA_X86_CLK_PMC_ATOM_H */
index edb9b040c94c31fe1ff91242232c10d0aea514ac..d5084ebd9f03045e7706872d0db88c3b06d88847 100644 (file)
@@ -9,6 +9,13 @@
 #include <linux/bug.h>                 /* For BUG_ON.  */
 #include <linux/pid_namespace.h>       /* For task_active_pid_ns.  */
 #include <uapi/linux/ptrace.h>
+#include <linux/seccomp.h>
+
+/* Add sp to seccomp_data, as seccomp is user API, we don't want to modify it */
+struct syscall_info {
+       __u64                   sp;
+       struct seccomp_data     data;
+};
 
 extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
                            void *buf, int len, unsigned int gup_flags);
@@ -407,9 +414,7 @@ static inline void user_single_step_report(struct pt_regs *regs)
 #define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
 #endif
 
-extern int task_current_syscall(struct task_struct *target, long *callno,
-                               unsigned long args[6], unsigned int maxargs,
-                               unsigned long *sp, unsigned long *pc);
+extern int task_current_syscall(struct task_struct *target, struct syscall_info *info);
 
 extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact);
 #endif
index 0cd9f10423fb8e60645685ab5bdbad675d803a51..a3fda9f024c3c1988b6ff60954d7f7e74a9c1ecf 100644 (file)
@@ -49,6 +49,27 @@ static inline void mmdrop(struct mm_struct *mm)
                __mmdrop(mm);
 }
 
+/*
+ * This has to be called after a get_task_mm()/mmget_not_zero()
+ * followed by taking the mmap_sem for writing before modifying the
+ * vmas or anything the coredump pretends not to change from under it.
+ *
+ * NOTE: find_extend_vma() called from GUP context is the only place
+ * that can modify the "mm" (notably the vm_start/end) under mmap_sem
+ * for reading and outside the context of the process, so it is also
+ * the only case that holds the mmap_sem for reading that must call
+ * this function. Generally if the mmap_sem is hold for reading
+ * there's no need of this check after get_task_mm()/mmget_not_zero().
+ *
+ * This function can be obsoleted and the check can be removed, after
+ * the coredump code will hold the mmap_sem for writing before
+ * invoking the ->core_dump methods.
+ */
+static inline bool mmget_still_valid(struct mm_struct *mm)
+{
+       return likely(!mm->core_state);
+}
+
 /**
  * mmget() - Pin the address space associated with a &struct mm_struct.
  * @mm: The address space to pin.
index f3fb1edb3526ddc0c582f0ad32017ab7eaf21dd3..20d815a331454f93e7a66d808a5a5f84601e9a58 100644 (file)
@@ -21,6 +21,7 @@ struct shmem_inode_info {
        struct list_head        swaplist;       /* chain of maybes on swap */
        struct shared_policy    policy;         /* NUMA memory alloc policy */
        struct simple_xattrs    xattrs;         /* list of xattrs */
+       atomic_t                stop_eviction;  /* hold when working on inode */
        struct inode            vfs_inode;
 };
 
index 7927b875f80cf6ff74425e6d6a990aa27a032e21..6ab0a6fa512e75882f62a517e22f53ad946b2356 100644 (file)
@@ -150,6 +150,9 @@ extern void * memscan(void *,int,__kernel_size_t);
 #ifndef __HAVE_ARCH_MEMCMP
 extern int memcmp(const void *,const void *,__kernel_size_t);
 #endif
+#ifndef __HAVE_ARCH_BCMP
+extern int bcmp(const void *,const void *,__kernel_size_t);
+#endif
 #ifndef __HAVE_ARCH_MEMCHR
 extern void * memchr(const void *,int,__kernel_size_t);
 #endif
index ec861cd0cfe8ce9fa5425909d243c660231f06c9..52d41d0c1ae1d54b6829a10134318ee2835b9fd5 100644 (file)
@@ -304,12 +304,4 @@ rpc_clnt_swap_deactivate(struct rpc_clnt *clnt)
 }
 #endif /* CONFIG_SUNRPC_SWAP */
 
-static inline bool
-rpc_task_need_resched(const struct rpc_task *task)
-{
-       if (RPC_IS_QUEUED(task) || task->tk_callback)
-               return true;
-       return false;
-}
-
 #endif /* _LINUX_SUNRPC_SCHED_H_ */
index fab02133a9197a43cd9df33728e657e1679c5e16..3dc70adfe5f5edbbfa6f6508849d1d3630a46e2b 100644 (file)
@@ -63,7 +63,7 @@ struct virtqueue;
 /*
  * Creates a virtqueue and allocates the descriptor ring.  If
  * may_reduce_num is set, then this may allocate a smaller ring than
- * expected.  The caller should query virtqueue_get_ring_size to learn
+ * expected.  The caller should query virtqueue_get_vring_size to learn
  * the actual size of the ring.
  */
 struct virtqueue *vring_create_virtqueue(unsigned int index,
index eaa1e762bf060b5b136ff7c219d75a670593b525..0c06178e4985b54c3dd12ba58ed91b28fd28343d 100644 (file)
@@ -17,6 +17,7 @@
 #define _VMW_VMCI_DEF_H_
 
 #include <linux/atomic.h>
+#include <linux/bits.h>
 
 /* Register offsets. */
 #define VMCI_STATUS_ADDR      0x00
 #define VMCI_MAX_DEVICES 1
 
 /* Status register bits. */
-#define VMCI_STATUS_INT_ON     0x1
+#define VMCI_STATUS_INT_ON     BIT(0)
 
 /* Control register bits. */
-#define VMCI_CONTROL_RESET        0x1
-#define VMCI_CONTROL_INT_ENABLE   0x2
-#define VMCI_CONTROL_INT_DISABLE  0x4
+#define VMCI_CONTROL_RESET        BIT(0)
+#define VMCI_CONTROL_INT_ENABLE   BIT(1)
+#define VMCI_CONTROL_INT_DISABLE  BIT(2)
 
 /* Capabilities register bits. */
-#define VMCI_CAPS_HYPERCALL     0x1
-#define VMCI_CAPS_GUESTCALL     0x2
-#define VMCI_CAPS_DATAGRAM      0x4
-#define VMCI_CAPS_NOTIFICATIONS 0x8
-#define VMCI_CAPS_PPN64         0x10
+#define VMCI_CAPS_HYPERCALL     BIT(0)
+#define VMCI_CAPS_GUESTCALL     BIT(1)
+#define VMCI_CAPS_DATAGRAM      BIT(2)
+#define VMCI_CAPS_NOTIFICATIONS BIT(3)
+#define VMCI_CAPS_PPN64         BIT(4)
 
 /* Interrupt Cause register bits. */
-#define VMCI_ICR_DATAGRAM      0x1
-#define VMCI_ICR_NOTIFICATION  0x2
+#define VMCI_ICR_DATAGRAM      BIT(0)
+#define VMCI_ICR_NOTIFICATION  BIT(1)
 
 /* Interrupt Mask register bits. */
-#define VMCI_IMR_DATAGRAM      0x1
-#define VMCI_IMR_NOTIFICATION  0x2
+#define VMCI_IMR_DATAGRAM      BIT(0)
+#define VMCI_IMR_NOTIFICATION  BIT(1)
 
 /* Maximum MSI/MSI-X interrupt vectors in the device. */
 #define VMCI_MAX_INTRS 2
@@ -463,9 +464,9 @@ struct vmci_datagram {
  * datagram callback is invoked in a delayed context (not interrupt context).
  */
 #define VMCI_FLAG_DG_NONE          0
-#define VMCI_FLAG_WELLKNOWN_DG_HND 0x1
-#define VMCI_FLAG_ANYCID_DG_HND    0x2
-#define VMCI_FLAG_DG_DELAYED_CB    0x4
+#define VMCI_FLAG_WELLKNOWN_DG_HND BIT(0)
+#define VMCI_FLAG_ANYCID_DG_HND    BIT(1)
+#define VMCI_FLAG_DG_DELAYED_CB    BIT(2)
 
 /*
  * Maximum supported size of a VMCI datagram for routable datagrams.
@@ -694,7 +695,7 @@ struct vmci_qp_detach_msg {
 };
 
 /* VMCI Doorbell API. */
-#define VMCI_FLAG_DELAYED_CB 0x01
+#define VMCI_FLAG_DELAYED_CB BIT(0)
 
 typedef void (*vmci_callback) (void *client_data);
 
index 2bfb87eb98ce15cd693819d42205a036ae6dd42f..78c856cba4f538c078fada09ef3238c2bc220069 100644 (file)
@@ -61,10 +61,12 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
                               rxrpc_user_attach_call_t, unsigned long, gfp_t,
                               unsigned int);
 void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
-u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
+bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *,
+                            u32 *);
 void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
 u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
 bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
                                 ktime_t *);
+bool rxrpc_kernel_call_is_complete(struct rxrpc_call *);
 
 #endif /* _NET_RXRPC_H */
index bb307a11ee636b7194bbe7d31c83f3d798b3379a..13bfeb712d36943cf9c04111a777d39cf08034a9 100644 (file)
@@ -7183,6 +7183,11 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev,
 #define wiphy_info(wiphy, format, args...)                     \
        dev_info(&(wiphy)->dev, format, ##args)
 
+#define wiphy_err_ratelimited(wiphy, format, args...)          \
+       dev_err_ratelimited(&(wiphy)->dev, format, ##args)
+#define wiphy_warn_ratelimited(wiphy, format, args...)         \
+       dev_warn_ratelimited(&(wiphy)->dev, format, ##args)
+
 #define wiphy_debug(wiphy, format, args...)                    \
        wiphy_printk(KERN_DEBUG, wiphy, format, ##args)
 
index be3cad9c2e4c37b282e5c2d0e2ef5f05a79b7438..583526aad1d0ac6ac3681cf9cf56d4496a221c3f 100644 (file)
@@ -677,7 +677,7 @@ int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
                             unsigned char __user *data, int optlen);
 void ip_options_undo(struct ip_options *opt);
 void ip_forward_options(struct sk_buff *skb);
-int ip_options_rcv_srr(struct sk_buff *skb);
+int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
 
 /*
  *     Functions provided by ip_sockglue.c
index ac2ed8ec662bd97ebe0337085e78e5a61906d499..112dc18c658f15f79525cae64afa0ea38e2a1159 100644 (file)
@@ -6231,8 +6231,6 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
  * @hw: pointer as obtained from ieee80211_alloc_hw()
  * @ac: AC number to return packets from.
  *
- * Should only be called between calls to ieee80211_txq_schedule_start()
- * and ieee80211_txq_schedule_end().
  * Returns the next txq if successful, %NULL if no queue is eligible. If a txq
  * is returned, it should be returned with ieee80211_return_txq() after the
  * driver has finished scheduling it.
@@ -6240,51 +6238,58 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
 struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac);
 
 /**
- * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
- *
- * @hw: pointer as obtained from ieee80211_alloc_hw()
- * @txq: pointer obtained from station or virtual interface
- *
- * Should only be called between calls to ieee80211_txq_schedule_start()
- * and ieee80211_txq_schedule_end().
- */
-void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
-
-/**
- * ieee80211_txq_schedule_start - acquire locks for safe scheduling of an AC
+ * ieee80211_txq_schedule_start - start new scheduling round for TXQs
  *
  * @hw: pointer as obtained from ieee80211_alloc_hw()
  * @ac: AC number to acquire locks for
  *
- * Acquire locks needed to schedule TXQs from the given AC. Should be called
- * before ieee80211_next_txq() or ieee80211_return_txq().
+ * Should be called before ieee80211_next_txq() or ieee80211_return_txq().
+ * The driver must not call multiple TXQ scheduling rounds concurrently.
  */
-void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
-       __acquires(txq_lock);
+void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac);
+
+/* (deprecated) */
+static inline void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
+{
+}
+
+void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
+                             struct ieee80211_txq *txq, bool force);
 
 /**
- * ieee80211_txq_schedule_end - release locks for safe scheduling of an AC
+ * ieee80211_schedule_txq - schedule a TXQ for transmission
  *
  * @hw: pointer as obtained from ieee80211_alloc_hw()
- * @ac: AC number to acquire locks for
+ * @txq: pointer obtained from station or virtual interface
  *
- * Release locks previously acquired by ieee80211_txq_schedule_end().
+ * Schedules a TXQ for transmission if it is not already scheduled,
+ * even if mac80211 does not have any packets buffered.
+ *
+ * The driver may call this function if it has buffered packets for
+ * this TXQ internally.
  */
-void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
-       __releases(txq_lock);
+static inline void
+ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+{
+       __ieee80211_schedule_txq(hw, txq, true);
+}
 
 /**
- * ieee80211_schedule_txq - schedule a TXQ for transmission
+ * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
  *
  * @hw: pointer as obtained from ieee80211_alloc_hw()
  * @txq: pointer obtained from station or virtual interface
+ * @force: schedule txq even if mac80211 does not have any buffered packets.
  *
- * Schedules a TXQ for transmission if it is not already scheduled. Takes a
- * lock, which means it must *not* be called between
- * ieee80211_txq_schedule_start() and ieee80211_txq_schedule_end()
+ * The driver may set force=true if it has buffered packets for this TXQ
+ * internally.
  */
-void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
-       __acquires(txq_lock) __releases(txq_lock);
+static inline void
+ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
+                    bool force)
+{
+       __ieee80211_schedule_txq(hw, txq, force);
+}
 
 /**
  * ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit
index a68ced28d8f47e09c6f6beabc56a22a04ae3f163..12689ddfc24c44fe3297d1eda548811d8061670b 100644 (file)
@@ -59,6 +59,7 @@ struct net {
                                                 */
        spinlock_t              rules_mod_lock;
 
+       u32                     hash_mix;
        atomic64_t              cookie_gen;
 
        struct list_head        list;           /* list of network namespaces */
index 16a842456189f2fc1a3363685b5dd4310a32b2b8..d9b665151f3d9e916f35620141542a5a145e6123 100644 (file)
@@ -2,16 +2,10 @@
 #ifndef __NET_NS_HASH_H__
 #define __NET_NS_HASH_H__
 
-#include <asm/cache.h>
-
-struct net;
+#include <net/net_namespace.h>
 
 static inline u32 net_hash_mix(const struct net *net)
 {
-#ifdef CONFIG_NET_NS
-       return (u32)(((unsigned long)net) >> ilog2(sizeof(*net)));
-#else
-       return 0;
-#endif
+       return net->hash_mix;
 }
 #endif
index 5a0714ff500fd09bd288360a83dad57952e5efaf..80f15b1c1a489a71479845ae0d077875b1a52f66 100644 (file)
@@ -266,7 +266,7 @@ void nr_stop_idletimer(struct sock *);
 int nr_t1timer_running(struct sock *);
 
 /* sysctl_net_netrom.c */
-void nr_register_sysctl(void);
+int nr_register_sysctl(void);
 void nr_unregister_sysctl(void);
 
 #endif
index 87499b6b35d6dd75ea3058449c5db484e2aca611..df5c69db68afc334d0ac51c031ca6120d8b7e6e9 100644 (file)
@@ -166,7 +166,7 @@ struct nci_conn_info {
  * According to specification 102 622 chapter 4.4 Pipes,
  * the pipe identifier is 7 bits long.
  */
-#define NCI_HCI_MAX_PIPES          127
+#define NCI_HCI_MAX_PIPES          128
 
 struct nci_hci_gate {
        u8 gate;
index 7d1a0483a17ba01b94643fd78acd72095a2e0adb..a2b38b3deeca2096d01d536d9a492047827ff994 100644 (file)
@@ -923,6 +923,41 @@ static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
        sch->qstats.overlimits++;
 }
 
+static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
+{
+       __u32 qlen = qdisc_qlen_sum(sch);
+
+       return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
+}
+
+static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch,  __u32 *qlen,
+                                            __u32 *backlog)
+{
+       struct gnet_stats_queue qstats = { 0 };
+       __u32 len = qdisc_qlen_sum(sch);
+
+       __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
+       *qlen = qstats.qlen;
+       *backlog = qstats.backlog;
+}
+
+static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
+{
+       __u32 qlen, backlog;
+
+       qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
+       qdisc_tree_reduce_backlog(sch, qlen, backlog);
+}
+
+static inline void qdisc_purge_queue(struct Qdisc *sch)
+{
+       __u32 qlen, backlog;
+
+       qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
+       qdisc_reset(sch);
+       qdisc_tree_reduce_backlog(sch, qlen, backlog);
+}
+
 static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
 {
        qh->head = NULL;
@@ -1106,13 +1141,8 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
        sch_tree_lock(sch);
        old = *pold;
        *pold = new;
-       if (old != NULL) {
-               unsigned int qlen = old->q.qlen;
-               unsigned int backlog = old->qstats.backlog;
-
-               qdisc_reset(old);
-               qdisc_tree_reduce_backlog(old, qlen, backlog);
-       }
+       if (old != NULL)
+               qdisc_tree_flush_backlog(old);
        sch_tree_unlock(sch);
 
        return old;
index 8de5ee258b93a50b2fdcde796bae3a5b53ce4d6a..341f8bafa0cf585fc72e5819054f1b2f15a8e338 100644 (file)
@@ -2084,12 +2084,6 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
  * @p:              poll_table
  *
  * See the comments in the wq_has_sleeper function.
- *
- * Do not derive sock from filp->private_data here. An SMC socket establishes
- * an internal TCP socket that is used in the fallback case. All socket
- * operations on the SMC socket are then forwarded to the TCP socket. In case of
- * poll, the filp->private_data pointer references the SMC socket because the
- * TCP socket has no file assigned.
  */
 static inline void sock_poll_wait(struct file *filp, struct socket *sock,
                                  poll_table *p)
index a5a938583295c0789df287c737b7a1c87556c9f1..5934246b2c6f4bafbe318fdddfacb328a2b9bf5c 100644 (file)
@@ -307,6 +307,7 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
 int tls_device_sendpage(struct sock *sk, struct page *page,
                        int offset, size_t size, int flags);
 void tls_device_sk_destruct(struct sock *sk);
+void tls_device_free_resources_tx(struct sock *sk);
 void tls_device_init(void);
 void tls_device_cleanup(void);
 int tls_tx_records(struct sock *sk, int flags);
@@ -330,6 +331,7 @@ int tls_push_sg(struct sock *sk, struct tls_context *ctx,
                int flags);
 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
                            int flags);
+bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
 
 static inline struct tls_msg *tls_msg(struct sk_buff *skb)
 {
@@ -379,7 +381,7 @@ tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
 static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
 {
 #ifdef CONFIG_SOCK_VALIDATE_XMIT
-       return sk_fullsock(sk) &
+       return sk_fullsock(sk) &&
               (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
               &tls_validate_xmit_skb);
 #else
index eb7db605955b852a469d11b62a93817f7726cf88..482b4ea87c3c4bd9fac786fa7165b30123174ea2 100644 (file)
@@ -802,8 +802,13 @@ struct snd_soc_component_driver {
        int probe_order;
        int remove_order;
 
-       /* signal if the module handling the component cannot be removed */
-       unsigned int ignore_module_refcount:1;
+       /*
+        * signal if the module handling the component should not be removed
+        * if a pcm is open. Setting this would prevent the module
+        * refcount being incremented in probe() but allow it be incremented
+        * when a pcm is opened and decremented when it is closed.
+        */
+       unsigned int module_get_upon_open:1;
 
        /* bits */
        unsigned int idle_bias_on:1;
@@ -1083,6 +1088,8 @@ struct snd_soc_card {
        struct mutex mutex;
        struct mutex dapm_mutex;
 
+       spinlock_t dpcm_lock;
+
        bool instantiated;
        bool topology_shortname_created;
 
index 44a3259ed4a5bde50e231a982624b0893e5bb0eb..b6e0cbc2c71f16df87380860e88544acb6d80a40 100644 (file)
@@ -28,7 +28,7 @@ TRACE_EVENT_FN(sys_enter,
 
        TP_fast_assign(
                __entry->id     = id;
-               syscall_get_arguments(current, regs, 0, 6, __entry->args);
+               syscall_get_arguments(current, regs, __entry->args);
        ),
 
        TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)",
diff --git a/include/uapi/linux/aspeed-p2a-ctrl.h b/include/uapi/linux/aspeed-p2a-ctrl.h
new file mode 100644 (file)
index 0000000..0333555
--- /dev/null
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Copyright 2019 Google Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Provides a simple driver to control the ASPEED P2A interface which allows
+ * the host to read and write to various regions of the BMC's memory.
+ */
+
+#ifndef _UAPI_LINUX_ASPEED_P2A_CTRL_H
+#define _UAPI_LINUX_ASPEED_P2A_CTRL_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define ASPEED_P2A_CTRL_READ_ONLY 0
+#define ASPEED_P2A_CTRL_READWRITE 1
+
+/*
+ * This driver provides a mechanism for enabling or disabling the read-write
+ * property of specific windows into the ASPEED BMC's memory.
+ *
+ * A user can map a region of the BMC's memory as read-only or read-write, with
+ * the caveat that once any region is mapped, all regions are unlocked for
+ * reading.
+ */
+
+/*
+ * Unlock a region of BMC physical memory for access from the host.
+ *
+ * Also used to read back the optional memory-region configuration for the
+ * driver.
+ */
+struct aspeed_p2a_ctrl_mapping {
+       __u64 addr;
+       __u32 length;
+       __u32 flags;
+};
+
+#define __ASPEED_P2A_CTRL_IOCTL_MAGIC 0xb3
+
+/*
+ * This IOCTL is meant to configure a region or regions of memory given a
+ * starting address and length to be readable by the host, or
+ * readable-writeable.
+ */
+#define ASPEED_P2A_CTRL_IOCTL_SET_WINDOW _IOW(__ASPEED_P2A_CTRL_IOCTL_MAGIC, \
+               0x00, struct aspeed_p2a_ctrl_mapping)
+
+/*
+ * This IOCTL is meant to read back to the user the base address and length of
+ * the memory-region specified to the driver for use with mmap.
+ */
+#define ASPEED_P2A_CTRL_IOCTL_GET_MEMORY_CONFIG \
+       _IOWR(__ASPEED_P2A_CTRL_IOCTL_MAGIC, \
+               0x01, struct aspeed_p2a_ctrl_mapping)
+
+#endif /* _UAPI_LINUX_ASPEED_P2A_CTRL_H */
index 3652b239dad1d7c556ff28a7a9a87129aa9b89d3..d473e5ed044c71df4e734f91c96f828952834dc3 100644 (file)
@@ -1591,7 +1591,7 @@ enum ethtool_link_mode_bit_indices {
 
 static inline int ethtool_validate_speed(__u32 speed)
 {
-       return speed <= INT_MAX || speed == SPEED_UNKNOWN;
+       return speed <= INT_MAX || speed == (__u32)SPEED_UNKNOWN;
 }
 
 /* Duplex, half or full. */
index 7f14d4a66c28c1c13d1388c6dacfcff30711edab..64cee116928ebd92d97acec08c273e02c89958ab 100644 (file)
 #define KEY_TITLE              0x171
 #define KEY_SUBTITLE           0x172
 #define KEY_ANGLE              0x173
-#define KEY_ZOOM               0x174
+#define KEY_FULL_SCREEN                0x174   /* AC View Toggle */
+#define KEY_ZOOM               KEY_FULL_SCREEN
 #define KEY_MODE               0x175
 #define KEY_KEYBOARD           0x176
-#define KEY_SCREEN             0x177
+#define KEY_ASPECT_RATIO       0x177   /* HUTRR37: Aspect */
+#define KEY_SCREEN             KEY_ASPECT_RATIO
 #define KEY_PC                 0x178   /* Media Select Computer */
 #define KEY_TV                 0x179   /* Media Select TV */
 #define KEY_TV2                        0x17a   /* Media Select Cable */
index 0f681cbd38d3b402fd2731d7fb39b96d5a55736b..c6aec86cc5de90619ca1074ae8a7042850d6b333 100644 (file)
@@ -1,70 +1,9 @@
 /* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
-/******************************************************************************
+/*
+ * Copyright(c) 2003-2015 Intel Corporation. All rights reserved.
  * Intel Management Engine Interface (Intel MEI) Linux driver
  * Intel MEI Interface Header
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *     Intel Corporation.
- *     linux-mei@linux.intel.com
- *     http://www.intel.com
- *
- * BSD LICENSE
- *
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
+ */
 #ifndef _LINUX_MEI_H
 #define _LINUX_MEI_H
 
index 7fd6f633534c2a7e5cddb4681448e37b8a7e6058..613d431da783d6bd7bca909cd0d8d8fe5570dc8a 100644 (file)
@@ -20,8 +20,8 @@
 /*
  * Queue Numbering
  *
- * The external queues (DMA channels + CPU) MUST be before the internal queues
- * and each group (DMA channels + CPU and internal) must be contiguous inside
+ * The external queues (PCI DMA channels) MUST be before the internal queues
+ * and each group (PCI DMA channels and internal) must be contiguous inside
  * itself but there can be a gap between the two groups (although not
  * recommended)
  */
@@ -33,7 +33,7 @@ enum goya_queue_id {
        GOYA_QUEUE_ID_DMA_3,
        GOYA_QUEUE_ID_DMA_4,
        GOYA_QUEUE_ID_CPU_PQ,
-       GOYA_QUEUE_ID_MME,
+       GOYA_QUEUE_ID_MME,      /* Internal queues start here */
        GOYA_QUEUE_ID_TPC0,
        GOYA_QUEUE_ID_TPC1,
        GOYA_QUEUE_ID_TPC2,
@@ -45,11 +45,18 @@ enum goya_queue_id {
        GOYA_QUEUE_ID_SIZE
 };
 
+enum hl_device_status {
+       HL_DEVICE_STATUS_OPERATIONAL,
+       HL_DEVICE_STATUS_IN_RESET,
+       HL_DEVICE_STATUS_MALFUNCTION
+};
+
 /* Opcode for management ioctl */
 #define HL_INFO_HW_IP_INFO     0
 #define HL_INFO_HW_EVENTS      1
 #define HL_INFO_DRAM_USAGE     2
 #define HL_INFO_HW_IDLE                3
+#define HL_INFO_DEVICE_STATUS  4
 
 #define HL_INFO_VERSION_MAX_LEN        128
 
@@ -82,6 +89,11 @@ struct hl_info_hw_idle {
        __u32 pad;
 };
 
+struct hl_info_device_status {
+       __u32 status;
+       __u32 pad;
+};
+
 struct hl_info_args {
        /* Location of relevant struct in userspace */
        __u64 return_pointer;
@@ -181,7 +193,10 @@ struct hl_cs_in {
 };
 
 struct hl_cs_out {
-       /* this holds the sequence number of the CS to pass to wait ioctl */
+       /*
+        * seq holds the sequence number of the CS to pass to wait ioctl. All
+        * values are valid except for 0 and ULLONG_MAX
+        */
        __u64 seq;
        /* HL_CS_STATUS_* */
        __u32 status;
@@ -320,6 +335,107 @@ union hl_mem_args {
        struct hl_mem_out out;
 };
 
+#define HL_DEBUG_MAX_AUX_VALUES                10
+
+struct hl_debug_params_etr {
+       /* Address in memory to allocate buffer */
+       __u64 buffer_address;
+
+       /* Size of buffer to allocate */
+       __u64 buffer_size;
+
+       /* Sink operation mode: SW fifo, HW fifo, Circular buffer */
+       __u32 sink_mode;
+       __u32 pad;
+};
+
+struct hl_debug_params_etf {
+       /* Address in memory to allocate buffer */
+       __u64 buffer_address;
+
+       /* Size of buffer to allocate */
+       __u64 buffer_size;
+
+       /* Sink operation mode: SW fifo, HW fifo, Circular buffer */
+       __u32 sink_mode;
+       __u32 pad;
+};
+
+struct hl_debug_params_stm {
+       /* Two bit masks for HW event and Stimulus Port */
+       __u64 he_mask;
+       __u64 sp_mask;
+
+       /* Trace source ID */
+       __u32 id;
+
+       /* Frequency for the timestamp register */
+       __u32 frequency;
+};
+
+struct hl_debug_params_bmon {
+       /* Transaction address filter */
+       __u64 addr_range0;
+       __u64 addr_range1;
+
+       /* Capture window configuration */
+       __u32 bw_win;
+       __u32 win_capture;
+
+       /* Trace source ID */
+       __u32 id;
+       __u32 pad;
+};
+
+struct hl_debug_params_spmu {
+       /* Event types selection */
+       __u64 event_types[HL_DEBUG_MAX_AUX_VALUES];
+
+       /* Number of event types selection */
+       __u32 event_types_num;
+       __u32 pad;
+};
+
+/* Opcode for ETR component */
+#define HL_DEBUG_OP_ETR                0
+/* Opcode for ETF component */
+#define HL_DEBUG_OP_ETF                1
+/* Opcode for STM component */
+#define HL_DEBUG_OP_STM                2
+/* Opcode for FUNNEL component */
+#define HL_DEBUG_OP_FUNNEL     3
+/* Opcode for BMON component */
+#define HL_DEBUG_OP_BMON       4
+/* Opcode for SPMU component */
+#define HL_DEBUG_OP_SPMU       5
+/* Opcode for timestamp */
+#define HL_DEBUG_OP_TIMESTAMP  6
+
+struct hl_debug_args {
+       /*
+        * Pointer to user input structure.
+        * This field is relevant to specific opcodes.
+        */
+       __u64 input_ptr;
+       /* Pointer to user output structure */
+       __u64 output_ptr;
+       /* Size of user input structure */
+       __u32 input_size;
+       /* Size of user output structure */
+       __u32 output_size;
+       /* HL_DEBUG_OP_* */
+       __u32 op;
+       /*
+        * Register index in the component, taken from the debug_regs_index enum
+        * in the various ASIC header files
+        */
+       __u32 reg_idx;
+       /* Enable/disable */
+       __u32 enable;
+       /* Context ID - Currently not in use */
+       __u32 ctx_id;
+};
+
 /*
  * Various information operations such as:
  * - H/W IP information
@@ -361,6 +477,12 @@ union hl_mem_args {
  * Each JOB will be enqueued on a specific queue, according to the user's input.
  * There can be more then one JOB per queue.
  *
+ * The CS IOCTL will receive three sets of JOBS. One set is for "restore" phase,
+ * a second set is for "execution" phase and a third set is for "store" phase.
+ * The JOBS on the "restore" phase are enqueued only after context-switch
+ * (or if its the first CS for this context). The user can also order the
+ * driver to run the "restore" phase explicitly
+ *
  * There are two types of queues - external and internal. External queues
  * are DMA queues which transfer data from/to the Host. All other queues are
  * internal. The driver will get completion notifications from the device only
@@ -377,19 +499,18 @@ union hl_mem_args {
  * relevant queues. Therefore, the user mustn't assume the CS has been completed
  * or has even started to execute.
  *
- * Upon successful enqueue, the IOCTL returns an opaque handle which the user
+ * Upon successful enqueue, the IOCTL returns a sequence number which the user
  * can use with the "Wait for CS" IOCTL to check whether the handle's CS
  * external JOBS have been completed. Note that if the CS has internal JOBS
  * which can execute AFTER the external JOBS have finished, the driver might
  * report that the CS has finished executing BEFORE the internal JOBS have
  * actually finish executing.
  *
- * The CS IOCTL will receive three sets of JOBS. One set is for "restore" phase,
- * a second set is for "execution" phase and a third set is for "store" phase.
- * The JOBS on the "restore" phase are enqueued only after context-switch
- * (or if its the first CS for this context). The user can also order the
- * driver to run the "restore" phase explicitly
- *
+ * Even though the sequence number increments per CS, the user can NOT
+ * automatically assume that if CS with sequence number N finished, then CS
+ * with sequence number N-1 also finished. The user can make this assumption if
+ * and only if CS N and CS N-1 are exactly the same (same CBs for the same
+ * queues).
  */
 #define HL_IOCTL_CS                    \
                _IOWR('H', 0x03, union hl_cs_args)
@@ -444,7 +565,20 @@ union hl_mem_args {
 #define HL_IOCTL_MEMORY                \
                _IOWR('H', 0x05, union hl_mem_args)
 
+/*
+ * Debug
+ * - Enable/disable the ETR/ETF/FUNNEL/STM/BMON/SPMU debug traces
+ *
+ * This IOCTL allows the user to get debug traces from the chip.
+ *
+ * The user needs to provide the register index and essential data such as
+ * buffer address and size.
+ *
+ */
+#define HL_IOCTL_DEBUG         \
+               _IOWR('H', 0x06, struct hl_debug_args)
+
 #define HL_COMMAND_START       0x01
-#define HL_COMMAND_END         0x06
+#define HL_COMMAND_END         0x07
 
 #endif /* HABANALABS_H_ */
index 404d4b9ffe7644553a1b59fba043b151d935a2e9..df1153cea0b7ee2a27e19682837f81922fef353e 100644 (file)
@@ -32,6 +32,7 @@
 
 #ifndef __KERNEL__
 #include <stdlib.h>
+#include <time.h>
 #endif
 
 /*
index 598e278b46f743d777e6f9375ac1c932896ef99c..7d4025d665eb95ee439ddb4e5e564aff8fa09c24 100644 (file)
@@ -582,6 +582,8 @@ asmlinkage __visible void __init start_kernel(void)
        page_alloc_init();
 
        pr_notice("Kernel command line: %s\n", boot_command_line);
+       /* parameters may set static keys */
+       jump_label_init();
        parse_early_param();
        after_dashes = parse_args("Booting kernel",
                                  static_command_line, __start___param,
@@ -591,8 +593,6 @@ asmlinkage __visible void __init start_kernel(void)
                parse_args("Setting init args", after_dashes, NULL, 0, -1, -1,
                           NULL, set_init_arg);
 
-       jump_label_init();
-
        /*
         * These use large bootmem allocations and must precede
         * kmem_cache_init()
index 8974b3755670e37b0540f3d48ee0b29da951a341..3c18260403dde1df951448c600b6ef9ac61f5635 100644 (file)
@@ -162,10 +162,14 @@ static void cpu_map_kthread_stop(struct work_struct *work)
 static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
                                         struct xdp_frame *xdpf)
 {
+       unsigned int hard_start_headroom;
        unsigned int frame_size;
        void *pkt_data_start;
        struct sk_buff *skb;
 
+       /* Part of headroom was reserved to xdpf */
+       hard_start_headroom = sizeof(struct xdp_frame) +  xdpf->headroom;
+
        /* build_skb need to place skb_shared_info after SKB end, and
         * also want to know the memory "truesize".  Thus, need to
         * know the memory frame size backing xdp_buff.
@@ -183,15 +187,15 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
         * is not at a fixed memory location, with mixed length
         * packets, which is bad for cache-line hotness.
         */
-       frame_size = SKB_DATA_ALIGN(xdpf->len + xdpf->headroom) +
+       frame_size = SKB_DATA_ALIGN(xdpf->len + hard_start_headroom) +
                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
-       pkt_data_start = xdpf->data - xdpf->headroom;
+       pkt_data_start = xdpf->data - hard_start_headroom;
        skb = build_skb(pkt_data_start, frame_size);
        if (!skb)
                return NULL;
 
-       skb_reserve(skb, xdpf->headroom);
+       skb_reserve(skb, hard_start_headroom);
        __skb_put(skb, xdpf->len);
        if (xdpf->metasize)
                skb_metadata_set(skb, xdpf->metasize);
@@ -205,6 +209,9 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
         * - RX ring dev queue index    (skb_record_rx_queue)
         */
 
+       /* Allow SKB to reuse area used by xdp_frame */
+       xdp_scrub_frame(xdpf);
+
        return skb;
 }
 
index 2ada5e21dfa62175d6cf9667ed4636e4c4ada659..4a8f390a2b821db8cff26f9716952b36013ab152 100644 (file)
@@ -554,19 +554,6 @@ struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type typ
 }
 EXPORT_SYMBOL(bpf_prog_get_type_path);
 
-static void bpf_evict_inode(struct inode *inode)
-{
-       enum bpf_type type;
-
-       truncate_inode_pages_final(&inode->i_data);
-       clear_inode(inode);
-
-       if (S_ISLNK(inode->i_mode))
-               kfree(inode->i_link);
-       if (!bpf_inode_type(inode, &type))
-               bpf_any_put(inode->i_private, type);
-}
-
 /*
  * Display the mount options in /proc/mounts.
  */
@@ -579,11 +566,28 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root)
        return 0;
 }
 
+static void bpf_destroy_inode_deferred(struct rcu_head *head)
+{
+       struct inode *inode = container_of(head, struct inode, i_rcu);
+       enum bpf_type type;
+
+       if (S_ISLNK(inode->i_mode))
+               kfree(inode->i_link);
+       if (!bpf_inode_type(inode, &type))
+               bpf_any_put(inode->i_private, type);
+       free_inode_nonrcu(inode);
+}
+
+static void bpf_destroy_inode(struct inode *inode)
+{
+       call_rcu(&inode->i_rcu, bpf_destroy_inode_deferred);
+}
+
 static const struct super_operations bpf_super_ops = {
        .statfs         = simple_statfs,
        .drop_inode     = generic_delete_inode,
        .show_options   = bpf_show_options,
-       .evict_inode    = bpf_evict_inode,
+       .destroy_inode  = bpf_destroy_inode,
 };
 
 enum {
index fd502c1f71eb003e5975ec58e33ccc8f8e1c0586..6c5a41f7f33856d79f641c57767c7c093ec2a831 100644 (file)
@@ -1897,8 +1897,9 @@ continue_func:
                }
                frame++;
                if (frame >= MAX_CALL_FRAMES) {
-                       WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
-                       return -EFAULT;
+                       verbose(env, "the call stack of %d frames is too deep !\n",
+                               frame);
+                       return -E2BIG;
                }
                goto process_func;
        }
index 45d51e8e26f62f27b8b91c3e53ddb2df20fd791b..a218e43cc38258ae6d7bed19f0d6e2ea852e2ac7 100644 (file)
@@ -706,7 +706,7 @@ static struct dma_debug_entry *dma_entry_alloc(void)
 #ifdef CONFIG_STACKTRACE
        entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
        entry->stacktrace.entries = entry->st_entries;
-       entry->stacktrace.skip = 2;
+       entry->stacktrace.skip = 1;
        save_stack_trace(&entry->stacktrace);
 #endif
 
index 72d06e302e9938dcaee6dbf57324d3ed31397035..dc7dead2d2cc2cfcd0448643650a878b90653a21 100644 (file)
@@ -2009,8 +2009,8 @@ event_sched_out(struct perf_event *event,
        event->pmu->del(event, 0);
        event->oncpu = -1;
 
-       if (event->pending_disable) {
-               event->pending_disable = 0;
+       if (READ_ONCE(event->pending_disable) >= 0) {
+               WRITE_ONCE(event->pending_disable, -1);
                state = PERF_EVENT_STATE_OFF;
        }
        perf_event_set_state(event, state);
@@ -2198,7 +2198,8 @@ EXPORT_SYMBOL_GPL(perf_event_disable);
 
 void perf_event_disable_inatomic(struct perf_event *event)
 {
-       event->pending_disable = 1;
+       WRITE_ONCE(event->pending_disable, smp_processor_id());
+       /* can fail, see perf_pending_event_disable() */
        irq_work_queue(&event->pending);
 }
 
@@ -5810,10 +5811,45 @@ void perf_event_wakeup(struct perf_event *event)
        }
 }
 
+static void perf_pending_event_disable(struct perf_event *event)
+{
+       int cpu = READ_ONCE(event->pending_disable);
+
+       if (cpu < 0)
+               return;
+
+       if (cpu == smp_processor_id()) {
+               WRITE_ONCE(event->pending_disable, -1);
+               perf_event_disable_local(event);
+               return;
+       }
+
+       /*
+        *  CPU-A                       CPU-B
+        *
+        *  perf_event_disable_inatomic()
+        *    @pending_disable = CPU-A;
+        *    irq_work_queue();
+        *
+        *  sched-out
+        *    @pending_disable = -1;
+        *
+        *                              sched-in
+        *                              perf_event_disable_inatomic()
+        *                                @pending_disable = CPU-B;
+        *                                irq_work_queue(); // FAILS
+        *
+        *  irq_work_run()
+        *    perf_pending_event()
+        *
+        * But the event runs on CPU-B and wants disabling there.
+        */
+       irq_work_queue_on(&event->pending, cpu);
+}
+
 static void perf_pending_event(struct irq_work *entry)
 {
-       struct perf_event *event = container_of(entry,
-                       struct perf_event, pending);
+       struct perf_event *event = container_of(entry, struct perf_event, pending);
        int rctx;
 
        rctx = perf_swevent_get_recursion_context();
@@ -5822,10 +5858,7 @@ static void perf_pending_event(struct irq_work *entry)
         * and we won't recurse 'further'.
         */
 
-       if (event->pending_disable) {
-               event->pending_disable = 0;
-               perf_event_disable_local(event);
-       }
+       perf_pending_event_disable(event);
 
        if (event->pending_wakeup) {
                event->pending_wakeup = 0;
@@ -9044,26 +9077,29 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
        if (task == TASK_TOMBSTONE)
                return;
 
-       if (!ifh->nr_file_filters)
-               return;
-
-       mm = get_task_mm(event->ctx->task);
-       if (!mm)
-               goto restart;
+       if (ifh->nr_file_filters) {
+               mm = get_task_mm(event->ctx->task);
+               if (!mm)
+                       goto restart;
 
-       down_read(&mm->mmap_sem);
+               down_read(&mm->mmap_sem);
+       }
 
        raw_spin_lock_irqsave(&ifh->lock, flags);
        list_for_each_entry(filter, &ifh->list, entry) {
-               event->addr_filter_ranges[count].start = 0;
-               event->addr_filter_ranges[count].size = 0;
+               if (filter->path.dentry) {
+                       /*
+                        * Adjust base offset if the filter is associated to a
+                        * binary that needs to be mapped:
+                        */
+                       event->addr_filter_ranges[count].start = 0;
+                       event->addr_filter_ranges[count].size = 0;
 
-               /*
-                * Adjust base offset if the filter is associated to a binary
-                * that needs to be mapped:
-                */
-               if (filter->path.dentry)
                        perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]);
+               } else {
+                       event->addr_filter_ranges[count].start = filter->offset;
+                       event->addr_filter_ranges[count].size  = filter->size;
+               }
 
                count++;
        }
@@ -9071,9 +9107,11 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
        event->addr_filters_gen++;
        raw_spin_unlock_irqrestore(&ifh->lock, flags);
 
-       up_read(&mm->mmap_sem);
+       if (ifh->nr_file_filters) {
+               up_read(&mm->mmap_sem);
 
-       mmput(mm);
+               mmput(mm);
+       }
 
 restart:
        perf_event_stop(event, 1);
@@ -10236,6 +10274,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 
 
        init_waitqueue_head(&event->waitq);
+       event->pending_disable = -1;
        init_irq_work(&event->pending, perf_pending_event);
 
        mutex_init(&event->mmap_mutex);
index a4047321d7d8052b40302d4ed8c0aa8e649ba759..5eedb49a65ea2c8f6e7ea8db916b1320806df0c9 100644 (file)
@@ -392,7 +392,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
                 * store that will be enabled on successful return
                 */
                if (!handle->size) { /* A, matches D */
-                       event->pending_disable = 1;
+                       event->pending_disable = smp_processor_id();
                        perf_output_wakeup(handle);
                        local_set(&rb->aux_nest, 0);
                        goto err_put;
@@ -455,24 +455,21 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
                rb->aux_head += size;
        }
 
-       if (size || handle->aux_flags) {
-               /*
-                * Only send RECORD_AUX if we have something useful to communicate
-                *
-                * Note: the OVERWRITE records by themselves are not considered
-                * useful, as they don't communicate any *new* information,
-                * aside from the short-lived offset, that becomes history at
-                * the next event sched-in and therefore isn't useful.
-                * The userspace that needs to copy out AUX data in overwrite
-                * mode should know to use user_page::aux_head for the actual
-                * offset. So, from now on we don't output AUX records that
-                * have *only* OVERWRITE flag set.
-                */
-
-               if (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE)
-                       perf_event_aux_event(handle->event, aux_head, size,
-                                            handle->aux_flags);
-       }
+       /*
+        * Only send RECORD_AUX if we have something useful to communicate
+        *
+        * Note: the OVERWRITE records by themselves are not considered
+        * useful, as they don't communicate any *new* information,
+        * aside from the short-lived offset, that becomes history at
+        * the next event sched-in and therefore isn't useful.
+        * The userspace that needs to copy out AUX data in overwrite
+        * mode should know to use user_page::aux_head for the actual
+        * offset. So, from now on we don't output AUX records that
+        * have *only* OVERWRITE flag set.
+        */
+       if (size || (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE))
+               perf_event_aux_event(handle->event, aux_head, size,
+                                    handle->aux_flags);
 
        rb->user_page->aux_head = rb->aux_head;
        if (rb_need_aux_wakeup(rb))
@@ -480,7 +477,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
 
        if (wakeup) {
                if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
-                       handle->event->pending_disable = 1;
+                       handle->event->pending_disable = smp_processor_id();
                perf_output_wakeup(handle);
        }
 
index 3faef4a77f7103e004c6a26f9c4074750f23fc73..51128bea3846ca1c15cd622f0889602cd1688b78 100644 (file)
@@ -1449,6 +1449,10 @@ int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
 {
        data = data->parent_data;
+
+       if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
+               return 0;
+
        if (data->chip->irq_set_wake)
                return data->chip->irq_set_wake(data, on);
 
index 13539e12cd8034279c8f324242e4996301203d1a..9f8a709337cf802f2ddbf33f90a956d851f0c5b3 100644 (file)
@@ -558,6 +558,7 @@ int __init early_irq_init(void)
                alloc_masks(&desc[i], node);
                raw_spin_lock_init(&desc[i].lock);
                lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
+               mutex_init(&desc[i].request_mutex);
                desc_set_defaults(i, &desc[i], node, NULL, NULL);
        }
        return arch_early_irq_init();
index c83e547271312e9f32ed5447fdb9eef1b8e2c2e7..b1ea30a5540e9e1af5f1c86e5590195b6f00bc9d 100644 (file)
@@ -709,7 +709,6 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
 static int reuse_unused_kprobe(struct kprobe *ap)
 {
        struct optimized_kprobe *op;
-       int ret;
 
        /*
         * Unused kprobe MUST be on the way of delayed unoptimizing (means
@@ -720,9 +719,8 @@ static int reuse_unused_kprobe(struct kprobe *ap)
        /* Enable the probe again */
        ap->flags &= ~KPROBE_FLAG_DISABLED;
        /* Optimize it again (remove from op->list) */
-       ret = kprobe_optready(ap);
-       if (ret)
-               return ret;
+       if (!kprobe_optready(ap))
+               return -EINVAL;
 
        optimize_kprobe(ap);
        return 0;
index 34cdcbedda492b84cb610af67cb11113ea04065d..e221be724fe82f0dbf7dc0426041b3ab6e5f01cf 100644 (file)
@@ -4689,8 +4689,8 @@ static void free_zapped_rcu(struct rcu_head *ch)
                return;
 
        raw_local_irq_save(flags);
-       if (!graph_lock())
-               goto out_irq;
+       arch_spin_lock(&lockdep_lock);
+       current->lockdep_recursion = 1;
 
        /* closed head */
        pf = delayed_free.pf + (delayed_free.index ^ 1);
@@ -4702,8 +4702,8 @@ static void free_zapped_rcu(struct rcu_head *ch)
         */
        call_rcu_zapped(delayed_free.pf + delayed_free.index);
 
-       graph_unlock();
-out_irq:
+       current->lockdep_recursion = 0;
+       arch_spin_unlock(&lockdep_lock);
        raw_local_irq_restore(flags);
 }
 
@@ -4744,21 +4744,17 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
 {
        struct pending_free *pf;
        unsigned long flags;
-       int locked;
 
        init_data_structures_once();
 
        raw_local_irq_save(flags);
-       locked = graph_lock();
-       if (!locked)
-               goto out_irq;
-
+       arch_spin_lock(&lockdep_lock);
+       current->lockdep_recursion = 1;
        pf = get_pending_free();
        __lockdep_free_key_range(pf, start, size);
        call_rcu_zapped(pf);
-
-       graph_unlock();
-out_irq:
+       current->lockdep_recursion = 0;
+       arch_spin_unlock(&lockdep_lock);
        raw_local_irq_restore(flags);
 
        /*
index 6a73e41a20160bf760e09ca050a82657ccf95347..43901fa3f26932d334f34f4b474a3cb55821513a 100644 (file)
@@ -252,7 +252,6 @@ static void task_non_contending(struct task_struct *p)
        if (dl_entity_is_special(dl_se))
                return;
 
-       WARN_ON(hrtimer_active(&dl_se->inactive_timer));
        WARN_ON(dl_se->dl_non_contending);
 
        zerolag_time = dl_se->deadline -
@@ -269,7 +268,7 @@ static void task_non_contending(struct task_struct *p)
         * If the "0-lag time" already passed, decrease the active
         * utilization now, instead of starting a timer
         */
-       if (zerolag_time < 0) {
+       if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
                if (dl_task(p))
                        sub_running_bw(dl_se, dl_rq);
                if (!dl_task(p) || p->state == TASK_DEAD) {
index fdab7eb6f3517af0ca9581a730b0771c28092fa0..a4d9e14bf13891482bb717d03946699fa04958b1 100644 (file)
@@ -4885,6 +4885,8 @@ static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
        return HRTIMER_NORESTART;
 }
 
+extern const u64 max_cfs_quota_period;
+
 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
 {
        struct cfs_bandwidth *cfs_b =
@@ -4892,6 +4894,7 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
        unsigned long flags;
        int overrun;
        int idle = 0;
+       int count = 0;
 
        raw_spin_lock_irqsave(&cfs_b->lock, flags);
        for (;;) {
@@ -4899,6 +4902,28 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
                if (!overrun)
                        break;
 
+               if (++count > 3) {
+                       u64 new, old = ktime_to_ns(cfs_b->period);
+
+                       new = (old * 147) / 128; /* ~115% */
+                       new = min(new, max_cfs_quota_period);
+
+                       cfs_b->period = ns_to_ktime(new);
+
+                       /* since max is 1s, this is limited to 1e9^2, which fits in u64 */
+                       cfs_b->quota *= new;
+                       cfs_b->quota = div64_u64(cfs_b->quota, old);
+
+                       pr_warn_ratelimited(
+       "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n",
+                               smp_processor_id(),
+                               div_u64(new, NSEC_PER_USEC),
+                               div_u64(cfs_b->quota, NSEC_PER_USEC));
+
+                       /* reset count so we don't come right back in here */
+                       count = 0;
+               }
+
                idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
        }
        if (idle)
@@ -7784,10 +7809,10 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
        if (cfs_rq->last_h_load_update == now)
                return;
 
-       cfs_rq->h_load_next = NULL;
+       WRITE_ONCE(cfs_rq->h_load_next, NULL);
        for_each_sched_entity(se) {
                cfs_rq = cfs_rq_of(se);
-               cfs_rq->h_load_next = se;
+               WRITE_ONCE(cfs_rq->h_load_next, se);
                if (cfs_rq->last_h_load_update == now)
                        break;
        }
@@ -7797,7 +7822,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
                cfs_rq->last_h_load_update = now;
        }
 
-       while ((se = cfs_rq->h_load_next) != NULL) {
+       while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
                load = cfs_rq->h_load;
                load = div64_ul(load * se->avg.load_avg,
                        cfs_rq_load_avg(cfs_rq) + 1);
index 54a0347ca8128f09cdbbcc83e2e8f8eea633a7ab..df27e499956a1a5a816fd31081c15292cdeb6777 100644 (file)
@@ -149,7 +149,7 @@ static void populate_seccomp_data(struct seccomp_data *sd)
 
        sd->nr = syscall_get_nr(task, regs);
        sd->arch = syscall_get_arch();
-       syscall_get_arguments(task, regs, 0, 6, args);
+       syscall_get_arguments(task, regs, args);
        sd->args[0] = args[0];
        sd->args[1] = args[1];
        sd->args[2] = args[2];
index b7953934aa994e7993254aa6b04438815ed37f1f..227ba170298e5b457c9b405c5376c466fe26850b 100644 (file)
@@ -3581,7 +3581,7 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
        if (flags)
                return -EINVAL;
 
-       f = fdget_raw(pidfd);
+       f = fdget(pidfd);
        if (!f.file)
                return -EBADF;
 
@@ -3605,16 +3605,11 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
                if (unlikely(sig != kinfo.si_signo))
                        goto err;
 
+               /* Only allow sending arbitrary signals to yourself. */
+               ret = -EPERM;
                if ((task_pid(current) != pid) &&
-                   (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL)) {
-                       /* Only allow sending arbitrary signals to yourself. */
-                       ret = -EPERM;
-                       if (kinfo.si_code != SI_USER)
-                               goto err;
-
-                       /* Turn this into a regular kill signal. */
-                       prepare_kill_siginfo(sig, &kinfo);
-               }
+                   (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
+                       goto err;
        } else {
                prepare_kill_siginfo(sig, &kinfo);
        }
index e5da394d1ca3675ef6bc050660c1d5a0892915ca..c9ec050bcf46126286dba7d122b3a7240de6de65 100644 (file)
@@ -128,6 +128,7 @@ static int zero;
 static int __maybe_unused one = 1;
 static int __maybe_unused two = 2;
 static int __maybe_unused four = 4;
+static unsigned long zero_ul;
 static unsigned long one_ul = 1;
 static unsigned long long_max = LONG_MAX;
 static int one_hundred = 100;
@@ -1750,7 +1751,7 @@ static struct ctl_table fs_table[] = {
                .maxlen         = sizeof(files_stat.max_files),
                .mode           = 0644,
                .proc_handler   = proc_doulongvec_minmax,
-               .extra1         = &zero,
+               .extra1         = &zero_ul,
                .extra2         = &long_max,
        },
        {
index 2c97e8c2d29fb3351332e447323750247d950f58..0519a8805aab3f290e3fb437ff0903a0e4722d90 100644 (file)
@@ -594,7 +594,7 @@ static ktime_t alarm_timer_remaining(struct k_itimer *timr, ktime_t now)
 {
        struct alarm *alarm = &timr->it.alarm.alarmtimer;
 
-       return ktime_sub(now, alarm->node.expires);
+       return ktime_sub(alarm->node.expires, now);
 }
 
 /**
index 094b82ca95e524149d201ed51ae938bbaef7eedd..930113b9799acb85c3c83263a56129b06a8d5ed6 100644 (file)
@@ -272,7 +272,7 @@ static u64 notrace suspended_sched_clock_read(void)
        return cd.read_data[seq & 1].epoch_cyc;
 }
 
-static int sched_clock_suspend(void)
+int sched_clock_suspend(void)
 {
        struct clock_read_data *rd = &cd.read_data[0];
 
@@ -283,7 +283,7 @@ static int sched_clock_suspend(void)
        return 0;
 }
 
-static void sched_clock_resume(void)
+void sched_clock_resume(void)
 {
        struct clock_read_data *rd = &cd.read_data[0];
 
index 529143b4c8d2a5212ded80e543d74b2cda8b8744..df401463a19131dc71b1e395577907c80f7f233c 100644 (file)
@@ -487,6 +487,7 @@ void tick_freeze(void)
                trace_suspend_resume(TPS("timekeeping_freeze"),
                                     smp_processor_id(), true);
                system_state = SYSTEM_SUSPEND;
+               sched_clock_suspend();
                timekeeping_suspend();
        } else {
                tick_suspend_local();
@@ -510,6 +511,7 @@ void tick_unfreeze(void)
 
        if (tick_freeze_depth == num_online_cpus()) {
                timekeeping_resume();
+               sched_clock_resume();
                system_state = SYSTEM_RUNNING;
                trace_suspend_resume(TPS("timekeeping_freeze"),
                                     smp_processor_id(), false);
index 7a9b4eb7a1d5bde85e7a7b1c7747602cdd605975..141ab3ab0354f39fdb5daf7274e8d061d90a556c 100644 (file)
@@ -14,6 +14,13 @@ extern u64 timekeeping_max_deferment(void);
 extern void timekeeping_warp_clock(void);
 extern int timekeeping_suspend(void);
 extern void timekeeping_resume(void);
+#ifdef CONFIG_GENERIC_SCHED_CLOCK
+extern int sched_clock_suspend(void);
+extern void sched_clock_resume(void);
+#else
+static inline int sched_clock_suspend(void) { return 0; }
+static inline void sched_clock_resume(void) { }
+#endif
 
 extern void do_timer(unsigned long ticks);
 extern void update_wall_time(void);
index 26c8ca9bd06b6725b84f42d6b635c0d256118ebb..b920358dd8f7f8cfcd226ba046e786901699c53a 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/list.h>
 #include <linux/hash.h>
 #include <linux/rcupdate.h>
+#include <linux/kprobes.h>
 
 #include <trace/events/sched.h>
 
@@ -6246,7 +6247,7 @@ void ftrace_reset_array_ops(struct trace_array *tr)
        tr->ops->func = ftrace_stub;
 }
 
-static inline void
+static nokprobe_inline void
 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
                       struct ftrace_ops *ignored, struct pt_regs *regs)
 {
@@ -6306,11 +6307,13 @@ static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
 {
        __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
 }
+NOKPROBE_SYMBOL(ftrace_ops_list_func);
 #else
 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
 {
        __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
 }
+NOKPROBE_SYMBOL(ftrace_ops_no_ops);
 #endif
 
 /*
@@ -6337,6 +6340,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
        preempt_enable_notrace();
        trace_clear_recursion(bit);
 }
+NOKPROBE_SYMBOL(ftrace_ops_assist_func);
 
 /**
  * ftrace_ops_get_func - get the function a trampoline should call
index 21153e64bf1c366033213e90272438ba171b2822..6c24755655c752a3bf9f4bb914ddb251d9ab0d2e 100644 (file)
@@ -7041,12 +7041,16 @@ static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
        buf->private = 0;
 }
 
-static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
+static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
                                struct pipe_buffer *buf)
 {
        struct buffer_ref *ref = (struct buffer_ref *)buf->private;
 
+       if (ref->ref > INT_MAX/2)
+               return false;
+
        ref->ref++;
+       return true;
 }
 
 /* Pipe buffer operations for a buffer. */
index f93a56d2db275be64df083344b68ec65f3c32473..fa8fbff736d684734e89f05fb149c4e83436d8f4 100644 (file)
@@ -314,6 +314,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
        struct ring_buffer_event *event;
        struct ring_buffer *buffer;
        unsigned long irq_flags;
+       unsigned long args[6];
        int pc;
        int syscall_nr;
        int size;
@@ -347,7 +348,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
 
        entry = ring_buffer_event_data(event);
        entry->nr = syscall_nr;
-       syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
+       syscall_get_arguments(current, regs, args);
+       memcpy(entry->args, args, sizeof(unsigned long) * sys_data->nb_args);
 
        event_trigger_unlock_commit(trace_file, buffer, event, entry,
                                    irq_flags, pc);
@@ -583,6 +585,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
        struct syscall_metadata *sys_data;
        struct syscall_trace_enter *rec;
        struct hlist_head *head;
+       unsigned long args[6];
        bool valid_prog_array;
        int syscall_nr;
        int rctx;
@@ -613,8 +616,8 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
                return;
 
        rec->nr = syscall_nr;
-       syscall_get_arguments(current, regs, 0, sys_data->nb_args,
-                              (unsigned long *)&rec->args);
+       syscall_get_arguments(current, regs, args);
+       memcpy(&rec->args, args, sizeof(unsigned long) * sys_data->nb_args);
 
        if ((valid_prog_array &&
             !perf_call_bpf_enter(sys_data->enter_event, regs, sys_data, rec)) ||
index 71381168dedef4e88382a1849412f554a4cb4a56..247bf0b1582ca1cf352f006aa1fd5f689f8f5859 100644 (file)
@@ -135,7 +135,8 @@ static void watchdog_overflow_callback(struct perf_event *event,
                if (__this_cpu_read(hard_watchdog_warn) == true)
                        return;
 
-               pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
+               pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n",
+                        this_cpu);
                print_modules();
                print_irqtrace_events(current);
                if (regs)
index 0d9e81779e373745c3e28497df424b415a50c3ac..00dbcdbc9a0d3fcac8b5c400b5f1a2eafd49c404 100644 (file)
@@ -753,9 +753,9 @@ endmenu # "Memory Debugging"
 config ARCH_HAS_KCOV
        bool
        help
-         KCOV does not have any arch-specific code, but currently it is enabled
-         only for x86_64. KCOV requires testing on other archs, and most likely
-         disabling of instrumentation for some early boot code.
+         An architecture should select this when it can successfully
+         build and run with CONFIG_KCOV. This typically requires
+         disabling instrumentation for some early boot code.
 
 config CC_HAS_SANCOV_TRACE_PC
        def_bool $(cc-option,-fsanitize-coverage=trace-pc)
index ea36dc355da131b4a45b71d8be6f1bc69a53e637..b396d328a7643b7c1984b8e87df9da88c45e7470 100644 (file)
@@ -1528,6 +1528,7 @@ EXPORT_SYMBOL(csum_and_copy_to_iter);
 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
                struct iov_iter *i)
 {
+#ifdef CONFIG_CRYPTO
        struct ahash_request *hash = hashp;
        struct scatterlist sg;
        size_t copied;
@@ -1537,6 +1538,9 @@ size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
        ahash_request_set_crypt(hash, &sg, NULL, copied);
        crypto_ahash_update(hash);
        return copied;
+#else
+       return 0;
+#endif
 }
 EXPORT_SYMBOL(hash_and_copy_to_iter);
 
index 4525fb09484427297853ca5819dcfaffee9265f1..a8ede77afe0db70fa7992319c470c2a65c07bf58 100644 (file)
@@ -291,13 +291,14 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
 {
        const unsigned char *ip = in;
        unsigned char *op = out;
+       unsigned char *data_start;
        size_t l = in_len;
        size_t t = 0;
        signed char state_offset = -2;
        unsigned int m4_max_offset;
 
-       // LZO v0 will never write 17 as first byte,
-       // so this is used to version the bitstream
+       // LZO v0 will never write 17 as first byte (except for zero-length
+       // input), so this is used to version the bitstream
        if (bitstream_version > 0) {
                *op++ = 17;
                *op++ = bitstream_version;
@@ -306,6 +307,8 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
                m4_max_offset = M4_MAX_OFFSET_V0;
        }
 
+       data_start = op;
+
        while (l > 20) {
                size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1);
                uintptr_t ll_end = (uintptr_t) ip + ll;
@@ -324,7 +327,7 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
        if (t > 0) {
                const unsigned char *ii = in + in_len - t;
 
-               if (op == out && t <= 238) {
+               if (op == data_start && t <= 238) {
                        *op++ = (17 + t);
                } else if (t <= 3) {
                        op[state_offset] |= t;
index 6d2600ea3b5547efa35ae1572e83fc56f0a325ad..9e07e9ef1aad7e7f8b0044f6bf954ff2f4ae9099 100644 (file)
@@ -54,11 +54,9 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
        if (unlikely(in_len < 3))
                goto input_overrun;
 
-       if (likely(*ip == 17)) {
+       if (likely(in_len >= 5) && likely(*ip == 17)) {
                bitstream_version = ip[1];
                ip += 2;
-               if (unlikely(in_len < 5))
-                       goto input_overrun;
        } else {
                bitstream_version = 0;
        }
index 38e4ca08e757cbb9bfa7b551c86062fa252a2d94..3ab861c1a857ad1d75bfdd59ff61dc51d1c7350d 100644 (file)
@@ -866,6 +866,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count)
 EXPORT_SYMBOL(memcmp);
 #endif
 
+#ifndef __HAVE_ARCH_BCMP
+/**
+ * bcmp - returns 0 if and only if the buffers have identical contents.
+ * @a: pointer to first buffer.
+ * @b: pointer to second buffer.
+ * @len: size of buffers.
+ *
+ * The sign or magnitude of a non-zero return value has no particular
+ * meaning, and architectures may implement their own more efficient bcmp(). So
+ * while this particular implementation is a simple (tail) call to memcmp, do
+ * not rely on anything but whether the return value is zero or non-zero.
+ */
+#undef bcmp
+int bcmp(const void *a, const void *b, size_t len)
+{
+       return memcmp(a, b, len);
+}
+EXPORT_SYMBOL(bcmp);
+#endif
+
 #ifndef __HAVE_ARCH_MEMSCAN
 /**
  * memscan - Find a character in an area of memory.
index 1a7077f20eae4079a25aae3c4d6703edaffd8d65..fb328e7ccb0893136e949f3581cc8b647f414de2 100644 (file)
@@ -5,16 +5,14 @@
 #include <linux/export.h>
 #include <asm/syscall.h>
 
-static int collect_syscall(struct task_struct *target, long *callno,
-                          unsigned long args[6], unsigned int maxargs,
-                          unsigned long *sp, unsigned long *pc)
+static int collect_syscall(struct task_struct *target, struct syscall_info *info)
 {
        struct pt_regs *regs;
 
        if (!try_get_task_stack(target)) {
                /* Task has no stack, so the task isn't in a syscall. */
-               *sp = *pc = 0;
-               *callno = -1;
+               memset(info, 0, sizeof(*info));
+               info->data.nr = -1;
                return 0;
        }
 
@@ -24,12 +22,13 @@ static int collect_syscall(struct task_struct *target, long *callno,
                return -EAGAIN;
        }
 
-       *sp = user_stack_pointer(regs);
-       *pc = instruction_pointer(regs);
+       info->sp = user_stack_pointer(regs);
+       info->data.instruction_pointer = instruction_pointer(regs);
 
-       *callno = syscall_get_nr(target, regs);
-       if (*callno != -1L && maxargs > 0)
-               syscall_get_arguments(target, regs, 0, maxargs, args);
+       info->data.nr = syscall_get_nr(target, regs);
+       if (info->data.nr != -1L)
+               syscall_get_arguments(target, regs,
+                                     (unsigned long *)&info->data.args[0]);
 
        put_task_stack(target);
        return 0;
@@ -38,41 +37,35 @@ static int collect_syscall(struct task_struct *target, long *callno,
 /**
  * task_current_syscall - Discover what a blocked task is doing.
  * @target:            thread to examine
- * @callno:            filled with system call number or -1
- * @args:              filled with @maxargs system call arguments
- * @maxargs:           number of elements in @args to fill
- * @sp:                        filled with user stack pointer
- * @pc:                        filled with user PC
+ * @info:              structure with the following fields:
+ *                      .sp        - filled with user stack pointer
+ *                      .data.nr   - filled with system call number or -1
+ *                      .data.args - filled with @maxargs system call arguments
+ *                      .data.instruction_pointer - filled with user PC
  *
- * If @target is blocked in a system call, returns zero with *@callno
- * set to the the call's number and @args filled in with its arguments.
- * Registers not used for system call arguments may not be available and
- * it is not kosher to use &struct user_regset calls while the system
+ * If @target is blocked in a system call, returns zero with @info.data.nr
+ * set to the the call's number and @info.data.args filled in with its
+ * arguments. Registers not used for system call arguments may not be available
+ * and it is not kosher to use &struct user_regset calls while the system
  * call is still in progress.  Note we may get this result if @target
  * has finished its system call but not yet returned to user mode, such
  * as when it's stopped for signal handling or syscall exit tracing.
  *
  * If @target is blocked in the kernel during a fault or exception,
- * returns zero with *@callno set to -1 and does not fill in @args.
- * If so, it's now safe to examine @target using &struct user_regset
- * get() calls as long as we're sure @target won't return to user mode.
+ * returns zero with *@info.data.nr set to -1 and does not fill in
+ * @info.data.args. If so, it's now safe to examine @target using
+ * &struct user_regset get() calls as long as we're sure @target won't return
+ * to user mode.
  *
  * Returns -%EAGAIN if @target does not remain blocked.
- *
- * Returns -%EINVAL if @maxargs is too large (maximum is six).
  */
-int task_current_syscall(struct task_struct *target, long *callno,
-                        unsigned long args[6], unsigned int maxargs,
-                        unsigned long *sp, unsigned long *pc)
+int task_current_syscall(struct task_struct *target, struct syscall_info *info)
 {
        long state;
        unsigned long ncsw;
 
-       if (unlikely(maxargs > 6))
-               return -EINVAL;
-
        if (target == current)
-               return collect_syscall(target, callno, args, maxargs, sp, pc);
+               return collect_syscall(target, info);
 
        state = target->state;
        if (unlikely(!state))
@@ -80,7 +73,7 @@ int task_current_syscall(struct task_struct *target, long *callno,
 
        ncsw = wait_task_inactive(target, state);
        if (unlikely(!ncsw) ||
-           unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) ||
+           unlikely(collect_syscall(target, info)) ||
            unlikely(wait_task_inactive(target, state) != ncsw))
                return -EAGAIN;
 
index f171a83707ced436bb2bd4508060a6cd45a95905..3319e0872d014628a6e505fc80d9daeb8d8a2b47 100644 (file)
@@ -242,6 +242,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
                                                        bool check_target)
 {
        struct page *page = pfn_to_online_page(pfn);
+       struct page *block_page;
        struct page *end_page;
        unsigned long block_pfn;
 
@@ -267,20 +268,26 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
            get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
                return false;
 
+       /* Ensure the start of the pageblock or zone is online and valid */
+       block_pfn = pageblock_start_pfn(pfn);
+       block_page = pfn_to_online_page(max(block_pfn, zone->zone_start_pfn));
+       if (block_page) {
+               page = block_page;
+               pfn = block_pfn;
+       }
+
+       /* Ensure the end of the pageblock or zone is online and valid */
+       block_pfn += pageblock_nr_pages;
+       block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
+       end_page = pfn_to_online_page(block_pfn);
+       if (!end_page)
+               return false;
+
        /*
         * Only clear the hint if a sample indicates there is either a
         * free page or an LRU page in the block. One or other condition
         * is necessary for the block to be a migration source/target.
         */
-       block_pfn = pageblock_start_pfn(pfn);
-       pfn = max(block_pfn, zone->zone_start_pfn);
-       page = pfn_to_page(pfn);
-       if (zone != page_zone(page))
-               return false;
-       pfn = block_pfn + pageblock_nr_pages;
-       pfn = min(pfn, zone_end_pfn(zone));
-       end_page = pfn_to_page(pfn);
-
        do {
                if (pfn_valid_within(pfn)) {
                        if (check_source && PageLRU(page)) {
@@ -309,7 +316,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
 static void __reset_isolation_suitable(struct zone *zone)
 {
        unsigned long migrate_pfn = zone->zone_start_pfn;
-       unsigned long free_pfn = zone_end_pfn(zone);
+       unsigned long free_pfn = zone_end_pfn(zone) - 1;
        unsigned long reset_migrate = free_pfn;
        unsigned long reset_free = migrate_pfn;
        bool source_set = false;
@@ -1363,7 +1370,7 @@ fast_isolate_freepages(struct compact_control *cc)
                                count_compact_events(COMPACTISOLATED, nr_isolated);
                        } else {
                                /* If isolation fails, abort the search */
-                               order = -1;
+                               order = cc->search_order + 1;
                                page = NULL;
                        }
                }
index f84e22685aaaaa7ff1167697af36a16960171a7d..91819b8ad9cc511ca15a3d84ff81131cd4e2d0da 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -160,8 +160,12 @@ retry:
                goto retry;
        }
 
-       if (flags & FOLL_GET)
-               get_page(page);
+       if (flags & FOLL_GET) {
+               if (unlikely(!try_get_page(page))) {
+                       page = ERR_PTR(-ENOMEM);
+                       goto out;
+               }
+       }
        if (flags & FOLL_TOUCH) {
                if ((flags & FOLL_WRITE) &&
                    !pte_dirty(pte) && !PageDirty(page))
@@ -298,7 +302,10 @@ retry_locked:
                        if (pmd_trans_unstable(pmd))
                                ret = -EBUSY;
                } else {
-                       get_page(page);
+                       if (unlikely(!try_get_page(page))) {
+                               spin_unlock(ptl);
+                               return ERR_PTR(-ENOMEM);
+                       }
                        spin_unlock(ptl);
                        lock_page(page);
                        ret = split_huge_page(page);
@@ -500,7 +507,10 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
                if (is_device_public_page(*page))
                        goto unmap;
        }
-       get_page(*page);
+       if (unlikely(!try_get_page(*page))) {
+               ret = -ENOMEM;
+               goto unmap;
+       }
 out:
        ret = 0;
 unmap:
@@ -1545,6 +1555,20 @@ static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
        }
 }
 
+/*
+ * Return the compund head page with ref appropriately incremented,
+ * or NULL if that failed.
+ */
+static inline struct page *try_get_compound_head(struct page *page, int refs)
+{
+       struct page *head = compound_head(page);
+       if (WARN_ON_ONCE(page_ref_count(head) < 0))
+               return NULL;
+       if (unlikely(!page_cache_add_speculative(head, refs)))
+               return NULL;
+       return head;
+}
+
 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
                         int write, struct page **pages, int *nr)
@@ -1579,9 +1603,9 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
 
                VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
                page = pte_page(pte);
-               head = compound_head(page);
 
-               if (!page_cache_get_speculative(head))
+               head = try_get_compound_head(page, 1);
+               if (!head)
                        goto pte_unmap;
 
                if (unlikely(pte_val(pte) != pte_val(*ptep))) {
@@ -1720,8 +1744,8 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
                refs++;
        } while (addr += PAGE_SIZE, addr != end);
 
-       head = compound_head(pmd_page(orig));
-       if (!page_cache_add_speculative(head, refs)) {
+       head = try_get_compound_head(pmd_page(orig), refs);
+       if (!head) {
                *nr -= refs;
                return 0;
        }
@@ -1758,8 +1782,8 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
                refs++;
        } while (addr += PAGE_SIZE, addr != end);
 
-       head = compound_head(pud_page(orig));
-       if (!page_cache_add_speculative(head, refs)) {
+       head = try_get_compound_head(pud_page(orig), refs);
+       if (!head) {
                *nr -= refs;
                return 0;
        }
@@ -1795,8 +1819,8 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
                refs++;
        } while (addr += PAGE_SIZE, addr != end);
 
-       head = compound_head(pgd_page(orig));
-       if (!page_cache_add_speculative(head, refs)) {
+       head = try_get_compound_head(pgd_page(orig), refs);
+       if (!head) {
                *nr -= refs;
                return 0;
        }
index 404acdcd0455d0d3dda191d994dfb27d0359104e..165ea46bf14926a4ae1ee664631475e0150f185f 100644 (file)
@@ -755,6 +755,21 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
        spinlock_t *ptl;
 
        ptl = pmd_lock(mm, pmd);
+       if (!pmd_none(*pmd)) {
+               if (write) {
+                       if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
+                               WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
+                               goto out_unlock;
+                       }
+                       entry = pmd_mkyoung(*pmd);
+                       entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+                       if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
+                               update_mmu_cache_pmd(vma, addr, pmd);
+               }
+
+               goto out_unlock;
+       }
+
        entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
        if (pfn_t_devmap(pfn))
                entry = pmd_mkdevmap(entry);
@@ -766,11 +781,16 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
        if (pgtable) {
                pgtable_trans_huge_deposit(mm, pmd, pgtable);
                mm_inc_nr_ptes(mm);
+               pgtable = NULL;
        }
 
        set_pmd_at(mm, addr, pmd, entry);
        update_mmu_cache_pmd(vma, addr, pmd);
+
+out_unlock:
        spin_unlock(ptl);
+       if (pgtable)
+               pte_free(mm, pgtable);
 }
 
 vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
@@ -821,6 +841,20 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
        spinlock_t *ptl;
 
        ptl = pud_lock(mm, pud);
+       if (!pud_none(*pud)) {
+               if (write) {
+                       if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
+                               WARN_ON_ONCE(!is_huge_zero_pud(*pud));
+                               goto out_unlock;
+                       }
+                       entry = pud_mkyoung(*pud);
+                       entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
+                       if (pudp_set_access_flags(vma, addr, pud, entry, 1))
+                               update_mmu_cache_pud(vma, addr, pud);
+               }
+               goto out_unlock;
+       }
+
        entry = pud_mkhuge(pfn_t_pud(pfn, prot));
        if (pfn_t_devmap(pfn))
                entry = pud_mkdevmap(entry);
@@ -830,6 +864,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
        }
        set_pud_at(mm, addr, pud, entry);
        update_mmu_cache_pud(vma, addr, pud);
+
+out_unlock:
        spin_unlock(ptl);
 }
 
index 97b1e0290c66d48737cda50ccea6bbcc1782c8fc..6cdc7b2d910039a5e9f4fb4724c34ad8e2216c45 100644 (file)
@@ -4299,6 +4299,19 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
                pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
                page = pte_page(huge_ptep_get(pte));
+
+               /*
+                * Instead of doing 'try_get_page()' below in the same_page
+                * loop, just check the count once here.
+                */
+               if (unlikely(page_count(page) <= 0)) {
+                       if (pages) {
+                               spin_unlock(ptl);
+                               remainder = 0;
+                               err = -ENOMEM;
+                               break;
+                       }
+               }
 same_page:
                if (pages) {
                        pages[i] = mem_map_offset(page, pfn_offset);
index 707fa5579f66f1e1e96a5613e50ff74b92417954..2e435b8142e51ac9237110b451b750a6d4423fcc 100644 (file)
@@ -1401,6 +1401,7 @@ static void scan_block(void *_start, void *_end,
 /*
  * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
  */
+#ifdef CONFIG_SMP
 static void scan_large_block(void *start, void *end)
 {
        void *next;
@@ -1412,6 +1413,7 @@ static void scan_large_block(void *start, void *end)
                cond_resched();
        }
 }
+#endif
 
 /*
  * Scan a memory block corresponding to a kmemleak_object. A condition is
@@ -1529,11 +1531,6 @@ static void kmemleak_scan(void)
        }
        rcu_read_unlock();
 
-       /* data/bss scanning */
-       scan_large_block(_sdata, _edata);
-       scan_large_block(__bss_start, __bss_stop);
-       scan_large_block(__start_ro_after_init, __end_ro_after_init);
-
 #ifdef CONFIG_SMP
        /* per-cpu sections scanning */
        for_each_possible_cpu(i)
@@ -2071,6 +2068,17 @@ void __init kmemleak_init(void)
        }
        local_irq_restore(flags);
 
+       /* register the data/bss sections */
+       create_object((unsigned long)_sdata, _edata - _sdata,
+                     KMEMLEAK_GREY, GFP_ATOMIC);
+       create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
+                     KMEMLEAK_GREY, GFP_ATOMIC);
+       /* only register .data..ro_after_init if not within .data */
+       if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
+               create_object((unsigned long)__start_ro_after_init,
+                             __end_ro_after_init - __start_ro_after_init,
+                             KMEMLEAK_GREY, GFP_ATOMIC);
+
        /*
         * This is the point where tracking allocations is safe. Automatic
         * scanning is started during the late initcall. Add the early logged
index 532e0e2a4817e36d9106634c85a95136790f5227..81a0d3914ec999efcb36fb590e75c29d059d2b24 100644 (file)
@@ -3882,6 +3882,22 @@ struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
        return &memcg->cgwb_domain;
 }
 
+/*
+ * idx can be of type enum memcg_stat_item or node_stat_item.
+ * Keep in sync with memcg_exact_page().
+ */
+static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
+{
+       long x = atomic_long_read(&memcg->stat[idx]);
+       int cpu;
+
+       for_each_online_cpu(cpu)
+               x += per_cpu_ptr(memcg->stat_cpu, cpu)->count[idx];
+       if (x < 0)
+               x = 0;
+       return x;
+}
+
 /**
  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
  * @wb: bdi_writeback in question
@@ -3907,10 +3923,10 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
        struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
        struct mem_cgroup *parent;
 
-       *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
+       *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
 
        /* this should eventually include NR_UNSTABLE_NFS */
-       *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
+       *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
        *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
                                                     (1 << LRU_ACTIVE_FILE));
        *pheadroom = PAGE_COUNTER_MAX;
index 41eb48d9b5276733e48b95f1addfcb228becd993..bd7b9f293b391f22b85810e48bc7c0679b217f05 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -45,6 +45,7 @@
 #include <linux/moduleparam.h>
 #include <linux/pkeys.h>
 #include <linux/oom.h>
+#include <linux/sched/mm.h>
 
 #include <linux/uaccess.h>
 #include <asm/cacheflush.h>
@@ -2525,7 +2526,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
        vma = find_vma_prev(mm, addr, &prev);
        if (vma && (vma->vm_start <= addr))
                return vma;
-       if (!prev || expand_stack(prev, addr))
+       /* don't alter vm_end if the coredump is running */
+       if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr))
                return NULL;
        if (prev->vm_flags & VM_LOCKED)
                populate_vma_page_range(prev, addr, prev->vm_end, NULL);
@@ -2551,6 +2553,9 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
                return vma;
        if (!(vma->vm_flags & VM_GROWSDOWN))
                return NULL;
+       /* don't alter vm_start if the coredump is running */
+       if (!mmget_still_valid(mm))
+               return NULL;
        start = vma->vm_start;
        if (expand_stack(vma, addr))
                return NULL;
index d96ca5bc555bbc432e135c876151e0699ee88162..c6ce20aaf80bbb8e1b306eed27e53c916296eda2 100644 (file)
@@ -8005,7 +8005,10 @@ void *__init alloc_large_system_hash(const char *tablename,
 bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
                         int migratetype, int flags)
 {
-       unsigned long pfn, iter, found;
+       unsigned long found;
+       unsigned long iter = 0;
+       unsigned long pfn = page_to_pfn(page);
+       const char *reason = "unmovable page";
 
        /*
         * TODO we could make this much more efficient by not checking every
@@ -8015,17 +8018,20 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
         * can still lead to having bootmem allocations in zone_movable.
         */
 
-       /*
-        * CMA allocations (alloc_contig_range) really need to mark isolate
-        * CMA pageblocks even when they are not movable in fact so consider
-        * them movable here.
-        */
-       if (is_migrate_cma(migratetype) &&
-                       is_migrate_cma(get_pageblock_migratetype(page)))
-               return false;
+       if (is_migrate_cma_page(page)) {
+               /*
+                * CMA allocations (alloc_contig_range) really need to mark
+                * isolate CMA pageblocks even when they are not movable in fact
+                * so consider them movable here.
+                */
+               if (is_migrate_cma(migratetype))
+                       return false;
+
+               reason = "CMA page";
+               goto unmovable;
+       }
 
-       pfn = page_to_pfn(page);
-       for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
+       for (found = 0; iter < pageblock_nr_pages; iter++) {
                unsigned long check = pfn + iter;
 
                if (!pfn_valid_within(check))
@@ -8105,7 +8111,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
 unmovable:
        WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
        if (flags & REPORT_FAILURE)
-               dump_page(pfn_to_page(pfn+iter), "unmovable page");
+               dump_page(pfn_to_page(pfn + iter), reason);
        return true;
 }
 
index 2e6fc8d552c96d58f615be2fd3addadefd01f5c0..68dd2e7e73b5f29b2d3dfd2bd9e4b984244d7dc9 100644 (file)
@@ -2567,8 +2567,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
                ai->groups[group].base_offset = areas[group] - base;
        }
 
-       pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
-               PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
+       pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
+               PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
                ai->dyn_size, ai->unit_size);
 
        rc = pcpu_setup_first_chunk(ai, base);
@@ -2692,8 +2692,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
        }
 
        /* we're ready, commit */
-       pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n",
-               unit_pages, psize_str, vm.addr, ai->static_size,
+       pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
+               unit_pages, psize_str, ai->static_size,
                ai->reserved_size, ai->dyn_size);
 
        rc = pcpu_setup_first_chunk(ai, vm.addr);
index b3db3779a30a1f1fbe2d5cda71ddc402601926ef..2275a0ff7c3051d9674ee59b153451c6078741c3 100644 (file)
@@ -1081,9 +1081,14 @@ static void shmem_evict_inode(struct inode *inode)
                        }
                        spin_unlock(&sbinfo->shrinklist_lock);
                }
-               if (!list_empty(&info->swaplist)) {
+               while (!list_empty(&info->swaplist)) {
+                       /* Wait while shmem_unuse() is scanning this inode... */
+                       wait_var_event(&info->stop_eviction,
+                                      !atomic_read(&info->stop_eviction));
                        mutex_lock(&shmem_swaplist_mutex);
-                       list_del_init(&info->swaplist);
+                       /* ...but beware of the race if we peeked too early */
+                       if (!atomic_read(&info->stop_eviction))
+                               list_del_init(&info->swaplist);
                        mutex_unlock(&shmem_swaplist_mutex);
                }
        }
@@ -1099,10 +1104,11 @@ extern struct swap_info_struct *swap_info[];
 static int shmem_find_swap_entries(struct address_space *mapping,
                                   pgoff_t start, unsigned int nr_entries,
                                   struct page **entries, pgoff_t *indices,
-                                  bool frontswap)
+                                  unsigned int type, bool frontswap)
 {
        XA_STATE(xas, &mapping->i_pages, start);
        struct page *page;
+       swp_entry_t entry;
        unsigned int ret = 0;
 
        if (!nr_entries)
@@ -1116,13 +1122,12 @@ static int shmem_find_swap_entries(struct address_space *mapping,
                if (!xa_is_value(page))
                        continue;
 
-               if (frontswap) {
-                       swp_entry_t entry = radix_to_swp_entry(page);
-
-                       if (!frontswap_test(swap_info[swp_type(entry)],
-                                           swp_offset(entry)))
-                               continue;
-               }
+               entry = radix_to_swp_entry(page);
+               if (swp_type(entry) != type)
+                       continue;
+               if (frontswap &&
+                   !frontswap_test(swap_info[type], swp_offset(entry)))
+                       continue;
 
                indices[ret] = xas.xa_index;
                entries[ret] = page;
@@ -1194,7 +1199,7 @@ static int shmem_unuse_inode(struct inode *inode, unsigned int type,
 
                pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
                                                  pvec.pages, indices,
-                                                 frontswap);
+                                                 type, frontswap);
                if (pvec.nr == 0) {
                        ret = 0;
                        break;
@@ -1227,36 +1232,27 @@ int shmem_unuse(unsigned int type, bool frontswap,
                unsigned long *fs_pages_to_unuse)
 {
        struct shmem_inode_info *info, *next;
-       struct inode *inode;
-       struct inode *prev_inode = NULL;
        int error = 0;
 
        if (list_empty(&shmem_swaplist))
                return 0;
 
        mutex_lock(&shmem_swaplist_mutex);
-
-       /*
-        * The extra refcount on the inode is necessary to safely dereference
-        * p->next after re-acquiring the lock. New shmem inodes with swap
-        * get added to the end of the list and we will scan them all.
-        */
        list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
                if (!info->swapped) {
                        list_del_init(&info->swaplist);
                        continue;
                }
-
-               inode = igrab(&info->vfs_inode);
-               if (!inode)
-                       continue;
-
+               /*
+                * Drop the swaplist mutex while searching the inode for swap;
+                * but before doing so, make sure shmem_evict_inode() will not
+                * remove placeholder inode from swaplist, nor let it be freed
+                * (igrab() would protect from unlink, but not from unmount).
+                */
+               atomic_inc(&info->stop_eviction);
                mutex_unlock(&shmem_swaplist_mutex);
-               if (prev_inode)
-                       iput(prev_inode);
-               prev_inode = inode;
 
-               error = shmem_unuse_inode(inode, type, frontswap,
+               error = shmem_unuse_inode(&info->vfs_inode, type, frontswap,
                                          fs_pages_to_unuse);
                cond_resched();
 
@@ -1264,14 +1260,13 @@ int shmem_unuse(unsigned int type, bool frontswap,
                next = list_next_entry(info, swaplist);
                if (!info->swapped)
                        list_del_init(&info->swaplist);
+               if (atomic_dec_and_test(&info->stop_eviction))
+                       wake_up_var(&info->stop_eviction);
                if (error)
                        break;
        }
        mutex_unlock(&shmem_swaplist_mutex);
 
-       if (prev_inode)
-               iput(prev_inode);
-
        return error;
 }
 
@@ -2238,6 +2233,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
                info = SHMEM_I(inode);
                memset(info, 0, (char *)inode - (char *)info);
                spin_lock_init(&info->lock);
+               atomic_set(&info->stop_eviction, 0);
                info->seals = F_SEAL_SEAL;
                info->flags = flags & VM_NORESERVE;
                INIT_LIST_HEAD(&info->shrinklist);
index 329bfe67f2cae966f07d930be7e10e2041505acb..9142ee99249327f22224b32c5805002ec4615dd3 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2374,7 +2374,6 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
                /* Slab management obj is off-slab. */
                freelist = kmem_cache_alloc_node(cachep->freelist_cache,
                                              local_flags, nodeid);
-               freelist = kasan_reset_tag(freelist);
                if (!freelist)
                        return NULL;
        } else {
@@ -4308,7 +4307,8 @@ static void show_symbol(struct seq_file *m, unsigned long address)
 
 static int leaks_show(struct seq_file *m, void *p)
 {
-       struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
+       struct kmem_cache *cachep = list_entry(p, struct kmem_cache,
+                                              root_caches_node);
        struct page *page;
        struct kmem_cache_node *n;
        const char *name;
index 2b8d9c3fbb47fd7a5c2a711dad73c5889dfe0bb2..cf63b5f01adf7da9d1def93f8763b50243adc698 100644 (file)
@@ -2023,7 +2023,6 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
  * If the boolean frontswap is true, only unuse pages_to_unuse pages;
  * pages_to_unuse==0 means all pages; ignored if frontswap is false
  */
-#define SWAP_UNUSE_MAX_TRIES 3
 int try_to_unuse(unsigned int type, bool frontswap,
                 unsigned long pages_to_unuse)
 {
@@ -2035,7 +2034,6 @@ int try_to_unuse(unsigned int type, bool frontswap,
        struct page *page;
        swp_entry_t entry;
        unsigned int i;
-       int retries = 0;
 
        if (!si->inuse_pages)
                return 0;
@@ -2053,11 +2051,9 @@ retry:
 
        spin_lock(&mmlist_lock);
        p = &init_mm.mmlist;
-       while ((p = p->next) != &init_mm.mmlist) {
-               if (signal_pending(current)) {
-                       retval = -EINTR;
-                       break;
-               }
+       while (si->inuse_pages &&
+              !signal_pending(current) &&
+              (p = p->next) != &init_mm.mmlist) {
 
                mm = list_entry(p, struct mm_struct, mmlist);
                if (!mmget_not_zero(mm))
@@ -2084,7 +2080,9 @@ retry:
        mmput(prev_mm);
 
        i = 0;
-       while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
+       while (si->inuse_pages &&
+              !signal_pending(current) &&
+              (i = find_next_to_unuse(si, i, frontswap)) != 0) {
 
                entry = swp_entry(type, i);
                page = find_get_page(swap_address_space(entry), i);
@@ -2117,14 +2115,18 @@ retry:
         * If yes, we would need to do retry the unuse logic again.
         * Under global memory pressure, swap entries can be reinserted back
         * into process space after the mmlist loop above passes over them.
-        * Its not worth continuosuly retrying to unuse the swap in this case.
-        * So we try SWAP_UNUSE_MAX_TRIES times.
+        *
+        * Limit the number of retries? No: when mmget_not_zero() above fails,
+        * that mm is likely to be freeing swap from exit_mmap(), which proceeds
+        * at its own independent pace; and even shmem_writepage() could have
+        * been preempted after get_swap_page(), temporarily hiding that swap.
+        * It's easy and robust (though cpu-intensive) just to keep retrying.
         */
-       if (++retries >= SWAP_UNUSE_MAX_TRIES)
-               retval = -EBUSY;
-       else if (si->inuse_pages)
-               goto retry;
-
+       if (si->inuse_pages) {
+               if (!signal_pending(current))
+                       goto retry;
+               retval = -EINTR;
+       }
 out:
        return (retval == FRONTSWAP_PAGES_UNUSED) ? 0 : retval;
 }
index d559bde497a9b9690f328f6f5deea3eb17de474b..43a2984bccaab6525afb6e7c61ce8d903f45f24e 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -204,7 +204,7 @@ EXPORT_SYMBOL(vmemdup_user);
  * @s: The string to duplicate
  * @n: Maximum number of bytes to copy, including the trailing NUL.
  *
- * Return: newly allocated copy of @s or %NULL in case of error
+ * Return: newly allocated copy of @s or an ERR_PTR() in case of error
  */
 char *strndup_user(const char __user *s, long n)
 {
index a5ad0b35ab8e3e6bea056baf2e5d8729f1003326..a815f73ee4d5b2d1a9872cca19db055845499aa1 100644 (file)
@@ -2176,7 +2176,6 @@ static void shrink_active_list(unsigned long nr_to_scan,
  *   10TB     320        32GB
  */
 static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
-                                struct mem_cgroup *memcg,
                                 struct scan_control *sc, bool actual_reclaim)
 {
        enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE;
@@ -2197,16 +2196,12 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
        inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx);
        active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx);
 
-       if (memcg)
-               refaults = memcg_page_state(memcg, WORKINGSET_ACTIVATE);
-       else
-               refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE);
-
        /*
         * When refaults are being observed, it means a new workingset
         * is being established. Disable active list protection to get
         * rid of the stale workingset quickly.
         */
+       refaults = lruvec_page_state(lruvec, WORKINGSET_ACTIVATE);
        if (file && actual_reclaim && lruvec->refaults != refaults) {
                inactive_ratio = 0;
        } else {
@@ -2227,12 +2222,10 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
 }
 
 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
-                                struct lruvec *lruvec, struct mem_cgroup *memcg,
-                                struct scan_control *sc)
+                                struct lruvec *lruvec, struct scan_control *sc)
 {
        if (is_active_lru(lru)) {
-               if (inactive_list_is_low(lruvec, is_file_lru(lru),
-                                        memcg, sc, true))
+               if (inactive_list_is_low(lruvec, is_file_lru(lru), sc, true))
                        shrink_active_list(nr_to_scan, lruvec, sc, lru);
                return 0;
        }
@@ -2332,7 +2325,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
                         * anonymous pages on the LRU in eligible zones.
                         * Otherwise, the small LRU gets thrashed.
                         */
-                       if (!inactive_list_is_low(lruvec, false, memcg, sc, false) &&
+                       if (!inactive_list_is_low(lruvec, false, sc, false) &&
                            lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, sc->reclaim_idx)
                                        >> sc->priority) {
                                scan_balance = SCAN_ANON;
@@ -2350,7 +2343,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
         * lruvec even if it has plenty of old anonymous pages unless the
         * system is under heavy pressure.
         */
-       if (!inactive_list_is_low(lruvec, true, memcg, sc, false) &&
+       if (!inactive_list_is_low(lruvec, true, sc, false) &&
            lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) {
                scan_balance = SCAN_FILE;
                goto out;
@@ -2503,7 +2496,7 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
                                nr[lru] -= nr_to_scan;
 
                                nr_reclaimed += shrink_list(lru, nr_to_scan,
-                                                           lruvec, memcg, sc);
+                                                           lruvec, sc);
                        }
                }
 
@@ -2570,7 +2563,7 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
         * Even if we did not try to evict anon pages at all, we want to
         * rebalance the anon lru active/inactive ratio.
         */
-       if (inactive_list_is_low(lruvec, false, memcg, sc, true))
+       if (inactive_list_is_low(lruvec, false, sc, true))
                shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
                                   sc, LRU_ACTIVE_ANON);
 }
@@ -2969,12 +2962,8 @@ static void snapshot_refaults(struct mem_cgroup *root_memcg, pg_data_t *pgdat)
                unsigned long refaults;
                struct lruvec *lruvec;
 
-               if (memcg)
-                       refaults = memcg_page_state(memcg, WORKINGSET_ACTIVATE);
-               else
-                       refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE);
-
                lruvec = mem_cgroup_lruvec(pgdat, memcg);
+               refaults = lruvec_page_state(lruvec, WORKINGSET_ACTIVATE);
                lruvec->refaults = refaults;
        } while ((memcg = mem_cgroup_iter(root_memcg, memcg, NULL)));
 }
@@ -3339,7 +3328,7 @@ static void age_active_anon(struct pglist_data *pgdat,
        do {
                struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
 
-               if (inactive_list_is_low(lruvec, false, memcg, sc, true))
+               if (inactive_list_is_low(lruvec, false, sc, true))
                        shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
                                           sc, LRU_ACTIVE_ANON);
 
index 36b56f858f0f0a8eb8db003d2b2a74a21a4c37f1..a7d493366a65b31b547ef25d34b94f2417895372 100644 (file)
@@ -1274,13 +1274,8 @@ const char * const vmstat_text[] = {
 #endif
 #endif /* CONFIG_MEMORY_BALLOON */
 #ifdef CONFIG_DEBUG_TLBFLUSH
-#ifdef CONFIG_SMP
        "nr_tlb_remote_flush",
        "nr_tlb_remote_flush_received",
-#else
-       "", /* nr_tlb_remote_flush */
-       "", /* nr_tlb_remote_flush_received */
-#endif /* CONFIG_SMP */
        "nr_tlb_local_flush_all",
        "nr_tlb_local_flush_one",
 #endif /* CONFIG_DEBUG_TLBFLUSH */
index 15293c2a5dd821d39233c177d540e4db7407b110..8d77b6ee4477df71bc466c057338ea4ae62dcd88 100644 (file)
@@ -443,27 +443,29 @@ static int vlan_dev_fcoe_disable(struct net_device *dev)
        return rc;
 }
 
-static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
+static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
+                                   struct scatterlist *sgl, unsigned int sgc)
 {
        struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
-       int rc = -EINVAL;
+       int rc = 0;
+
+       if (ops->ndo_fcoe_ddp_target)
+               rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
 
-       if (ops->ndo_fcoe_get_wwn)
-               rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
        return rc;
 }
+#endif
 
-static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
-                                   struct scatterlist *sgl, unsigned int sgc)
+#ifdef NETDEV_FCOE_WWNN
+static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
 {
        struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
-       int rc = 0;
-
-       if (ops->ndo_fcoe_ddp_target)
-               rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
+       int rc = -EINVAL;
 
+       if (ops->ndo_fcoe_get_wwn)
+               rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
        return rc;
 }
 #endif
@@ -794,9 +796,11 @@ static const struct net_device_ops vlan_netdev_ops = {
        .ndo_fcoe_ddp_done      = vlan_dev_fcoe_ddp_done,
        .ndo_fcoe_enable        = vlan_dev_fcoe_enable,
        .ndo_fcoe_disable       = vlan_dev_fcoe_disable,
-       .ndo_fcoe_get_wwn       = vlan_dev_fcoe_get_wwn,
        .ndo_fcoe_ddp_target    = vlan_dev_fcoe_ddp_target,
 #endif
+#ifdef NETDEV_FCOE_WWNN
+       .ndo_fcoe_get_wwn       = vlan_dev_fcoe_get_wwn,
+#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = vlan_dev_poll_controller,
        .ndo_netpoll_setup      = vlan_dev_netpoll_setup,
index d7f5cf5b7594d0ea4e766e06fbc07e6fce590e3b..ad4f829193f053c8a0c0846f1e9f619617dcd18e 100644 (file)
@@ -710,7 +710,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
 
 static int lec_mcast_attach(struct atm_vcc *vcc, int arg)
 {
-       if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg])
+       if (arg < 0 || arg >= MAX_LEC_ITF)
+               return -EINVAL;
+       arg = array_index_nospec(arg, MAX_LEC_ITF);
+       if (!dev_lec[arg])
                return -EINVAL;
        vcc->proto_data = dev_lec[arg];
        return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc);
@@ -728,6 +731,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
                i = arg;
        if (arg >= MAX_LEC_ITF)
                return -EINVAL;
+       i = array_index_nospec(arg, MAX_LEC_ITF);
        if (!dev_lec[i]) {
                int size;
 
index a9b7919c9de55396d35b152a86edbf814287d8dd..d5df0114f08ac5331dcd46699551556ba60a4b80 100644 (file)
@@ -104,8 +104,10 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
 
                ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
 
-               /* free the TID stats immediately */
-               cfg80211_sinfo_release_content(&sinfo);
+               if (!ret) {
+                       /* free the TID stats immediately */
+                       cfg80211_sinfo_release_content(&sinfo);
+               }
 
                dev_put(real_netdev);
                if (ret == -ENOENT) {
index ef39aabdb69435f384ae2ebef26d4d31367f427c..4fb01108e5f534b8a055edface0717f950ccdacc 100644 (file)
@@ -803,6 +803,8 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
                                 const u8 *mac, const unsigned short vid)
 {
        struct batadv_bla_claim search_claim, *claim;
+       struct batadv_bla_claim *claim_removed_entry;
+       struct hlist_node *claim_removed_node;
 
        ether_addr_copy(search_claim.addr, mac);
        search_claim.vid = vid;
@@ -813,10 +815,18 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
        batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
                   mac, batadv_print_vid(vid));
 
-       batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
-                          batadv_choose_claim, claim);
-       batadv_claim_put(claim); /* reference from the hash is gone */
+       claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash,
+                                               batadv_compare_claim,
+                                               batadv_choose_claim, claim);
+       if (!claim_removed_node)
+               goto free_claim;
 
+       /* reference from the hash is gone */
+       claim_removed_entry = hlist_entry(claim_removed_node,
+                                         struct batadv_bla_claim, hash_entry);
+       batadv_claim_put(claim_removed_entry);
+
+free_claim:
        /* don't need the reference from hash_find() anymore */
        batadv_claim_put(claim);
 }
index 0b4b3fb778a61708978438bcaf7adfd05b997668..208655cf67179b5c2bbcfb8330993b42ca70109f 100644 (file)
@@ -1116,9 +1116,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
                                                struct attribute *attr,
                                                char *buff, size_t count)
 {
-       struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
        struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
        struct batadv_hard_iface *hard_iface;
+       struct batadv_priv *bat_priv;
        u32 tp_override;
        u32 old_tp_override;
        bool ret;
@@ -1147,7 +1147,10 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
 
        atomic_set(&hard_iface->bat_v.throughput_override, tp_override);
 
-       batadv_netlink_notify_hardif(bat_priv, hard_iface);
+       if (hard_iface->soft_iface) {
+               bat_priv = netdev_priv(hard_iface->soft_iface);
+               batadv_netlink_notify_hardif(bat_priv, hard_iface);
+       }
 
 out:
        batadv_hardif_put(hard_iface);
index f73d79139ae79834a3e429fab82b7d55e8a373d5..26c4e2493ddfbfdea26a9b45409821834010a913 100644 (file)
@@ -616,14 +616,26 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
                                  struct batadv_tt_global_entry *tt_global,
                                  const char *message)
 {
+       struct batadv_tt_global_entry *tt_removed_entry;
+       struct hlist_node *tt_removed_node;
+
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Deleting global tt entry %pM (vid: %d): %s\n",
                   tt_global->common.addr,
                   batadv_print_vid(tt_global->common.vid), message);
 
-       batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
-                          batadv_choose_tt, &tt_global->common);
-       batadv_tt_global_entry_put(tt_global);
+       tt_removed_node = batadv_hash_remove(bat_priv->tt.global_hash,
+                                            batadv_compare_tt,
+                                            batadv_choose_tt,
+                                            &tt_global->common);
+       if (!tt_removed_node)
+               return;
+
+       /* drop reference of remove hash entry */
+       tt_removed_entry = hlist_entry(tt_removed_node,
+                                      struct batadv_tt_global_entry,
+                                      common.hash_entry);
+       batadv_tt_global_entry_put(tt_removed_entry);
 }
 
 /**
@@ -1337,9 +1349,10 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
                           unsigned short vid, const char *message,
                           bool roaming)
 {
+       struct batadv_tt_local_entry *tt_removed_entry;
        struct batadv_tt_local_entry *tt_local_entry;
        u16 flags, curr_flags = BATADV_NO_FLAGS;
-       void *tt_entry_exists;
+       struct hlist_node *tt_removed_node;
 
        tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
        if (!tt_local_entry)
@@ -1368,15 +1381,18 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
         */
        batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
 
-       tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
+       tt_removed_node = batadv_hash_remove(bat_priv->tt.local_hash,
                                             batadv_compare_tt,
                                             batadv_choose_tt,
                                             &tt_local_entry->common);
-       if (!tt_entry_exists)
+       if (!tt_removed_node)
                goto out;
 
-       /* extra call to free the local tt entry */
-       batadv_tt_local_entry_put(tt_local_entry);
+       /* drop reference of remove hash entry */
+       tt_removed_entry = hlist_entry(tt_removed_node,
+                                      struct batadv_tt_local_entry,
+                                      common.hash_entry);
+       batadv_tt_local_entry_put(tt_removed_entry);
 
 out:
        if (tt_local_entry)
index 9a580999ca57e3037336bbcdb321dbb4ef0cb196..d892b7c3cc42a05e10053832d7bd4d969f019e46 100644 (file)
@@ -523,12 +523,12 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr,
        struct sock *sk = sock->sk;
        int err = 0;
 
-       BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
-
        if (!addr || addr_len < sizeof(struct sockaddr_sco) ||
            addr->sa_family != AF_BLUETOOTH)
                return -EINVAL;
 
+       BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
+
        lock_sock(sk);
 
        if (sk->sk_state != BT_OPEN) {
index 5ea7e56119c13876a8726ffee2e9dc43ce73406f..ba303ee99b9b59762e724072d0f66907f46235b2 100644 (file)
@@ -197,13 +197,10 @@ static void __br_handle_local_finish(struct sk_buff *skb)
 /* note: already called with rcu_read_lock */
 static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-       struct net_bridge_port *p = br_port_get_rcu(skb->dev);
-
        __br_handle_local_finish(skb);
 
-       BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
-       br_pass_frame_up(skb);
-       return 0;
+       /* return 1 to signal the okfn() was called so it's ok to use the skb */
+       return 1;
 }
 
 /*
@@ -280,10 +277,18 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
                                goto forward;
                }
 
-               /* Deliver packet to local host only */
-               NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(skb->dev),
-                       NULL, skb, skb->dev, NULL, br_handle_local_finish);
-               return RX_HANDLER_CONSUMED;
+               /* The else clause should be hit when nf_hook():
+                *   - returns < 0 (drop/error)
+                *   - returns = 0 (stolen/nf_queue)
+                * Thus return 1 from the okfn() to signal the skb is ok to pass
+                */
+               if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
+                           dev_net(skb->dev), NULL, skb, skb->dev, NULL,
+                           br_handle_local_finish) == 1) {
+                       return RX_HANDLER_PASS;
+               } else {
+                       return RX_HANDLER_CONSUMED;
+               }
        }
 
 forward:
index a0e369179f6d1316ec261521b857a7687272d3b9..45e7f4173bbafe7e59e2ea514a7bccbee8456c79 100644 (file)
@@ -601,6 +601,7 @@ static int br_ip4_multicast_add_group(struct net_bridge *br,
        if (ipv4_is_local_multicast(group))
                return 0;
 
+       memset(&br_group, 0, sizeof(br_group));
        br_group.u.ip4 = group;
        br_group.proto = htons(ETH_P_IP);
        br_group.vid = vid;
@@ -1497,6 +1498,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
 
        own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
 
+       memset(&br_group, 0, sizeof(br_group));
        br_group.u.ip4 = group;
        br_group.proto = htons(ETH_P_IP);
        br_group.vid = vid;
@@ -1520,6 +1522,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
 
        own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
 
+       memset(&br_group, 0, sizeof(br_group));
        br_group.u.ip6 = *group;
        br_group.proto = htons(ETH_P_IPV6);
        br_group.vid = vid;
@@ -2028,7 +2031,8 @@ static void br_multicast_start_querier(struct net_bridge *br,
 
        __br_multicast_open(br, query);
 
-       list_for_each_entry(port, &br->port_list, list) {
+       rcu_read_lock();
+       list_for_each_entry_rcu(port, &br->port_list, list) {
                if (port->state == BR_STATE_DISABLED ||
                    port->state == BR_STATE_BLOCKING)
                        continue;
@@ -2040,6 +2044,7 @@ static void br_multicast_start_querier(struct net_bridge *br,
                        br_multicast_enable(&port->ip6_own_query);
 #endif
        }
+       rcu_read_unlock();
 }
 
 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
index 9c07591b0232e6bbdecf7efece7c9143842671b1..7104cf13da840d21cca1a63d3c1551ff9bbd9076 100644 (file)
@@ -1441,7 +1441,7 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
            nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED,
                       br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) ||
            nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT,
-                      br_opt_get(br, IFLA_BR_VLAN_STATS_PER_PORT)))
+                      br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)))
                return -EMSGSIZE;
 #endif
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
index b2651bb6d2a31dde065000c59bbbf3dfdadd976e..e657289db4ac44b023ce40ca9959185a6a33cb22 100644 (file)
@@ -279,7 +279,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
                        break;
 
                sk_busy_loop(sk, flags & MSG_DONTWAIT);
-       } while (!skb_queue_empty(&sk->sk_receive_queue));
+       } while (sk->sk_receive_queue.prev != *last);
 
        error = -EAGAIN;
 
index 2b67f2aa59ddb64d27378bed44f9d262093219e0..f409406254ddf2e204676bb8bdfb95d0cb3a0e71 100644 (file)
@@ -1184,7 +1184,21 @@ int dev_change_name(struct net_device *dev, const char *newname)
        BUG_ON(!dev_net(dev));
 
        net = dev_net(dev);
-       if (dev->flags & IFF_UP)
+
+       /* Some auto-enslaved devices e.g. failover slaves are
+        * special, as userspace might rename the device after
+        * the interface had been brought up and running since
+        * the point kernel initiated auto-enslavement. Allow
+        * live name change even when these slave devices are
+        * up and running.
+        *
+        * Typically, users of these auto-enslaving devices
+        * don't actually care about slave name change, as
+        * they are supposed to operate on master interface
+        * directly.
+        */
+       if (dev->flags & IFF_UP &&
+           likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
                return -EBUSY;
 
        write_seqcount_begin(&devnet_rename_seq);
@@ -5014,8 +5028,10 @@ static inline void __netif_receive_skb_list_ptype(struct list_head *head,
        if (pt_prev->list_func != NULL)
                pt_prev->list_func(head, pt_prev, orig_dev);
        else
-               list_for_each_entry_safe(skb, next, head, list)
+               list_for_each_entry_safe(skb, next, head, list) {
+                       skb_list_del_init(skb);
                        pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+               }
 }
 
 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
index b1eb324197321dd390596edd2aec6c343ee14654..36ed619faf3641ae4d40edc7385b003de7500764 100644 (file)
@@ -1797,11 +1797,16 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
        WARN_ON_ONCE(!ret);
 
        gstrings.len = ret;
-       data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
-       if (gstrings.len && !data)
-               return -ENOMEM;
 
-       __ethtool_get_strings(dev, gstrings.string_set, data);
+       if (gstrings.len) {
+               data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
+               if (!data)
+                       return -ENOMEM;
+
+               __ethtool_get_strings(dev, gstrings.string_set, data);
+       } else {
+               data = NULL;
+       }
 
        ret = -EFAULT;
        if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
@@ -1897,11 +1902,15 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
                return -EFAULT;
 
        stats.n_stats = n_stats;
-       data = vzalloc(array_size(n_stats, sizeof(u64)));
-       if (n_stats && !data)
-               return -ENOMEM;
 
-       ops->get_ethtool_stats(dev, &stats, data);
+       if (n_stats) {
+               data = vzalloc(array_size(n_stats, sizeof(u64)));
+               if (!data)
+                       return -ENOMEM;
+               ops->get_ethtool_stats(dev, &stats, data);
+       } else {
+               data = NULL;
+       }
 
        ret = -EFAULT;
        if (copy_to_user(useraddr, &stats, sizeof(stats)))
@@ -1941,16 +1950,21 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
                return -EFAULT;
 
        stats.n_stats = n_stats;
-       data = vzalloc(array_size(n_stats, sizeof(u64)));
-       if (n_stats && !data)
-               return -ENOMEM;
 
-       if (dev->phydev && !ops->get_ethtool_phy_stats) {
-               ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
-               if (ret < 0)
-                       return ret;
+       if (n_stats) {
+               data = vzalloc(array_size(n_stats, sizeof(u64)));
+               if (!data)
+                       return -ENOMEM;
+
+               if (dev->phydev && !ops->get_ethtool_phy_stats) {
+                       ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
+                       if (ret < 0)
+                               goto out;
+               } else {
+                       ops->get_ethtool_phy_stats(dev, &stats, data);
+               }
        } else {
-               ops->get_ethtool_phy_stats(dev, &stats, data);
+               data = NULL;
        }
 
        ret = -EFAULT;
index 4a92a98ccce9a0570cdc75c66180db2f7305073f..b5cd3c727285d7a1738118c246abce8d31dac08f 100644 (file)
@@ -80,14 +80,14 @@ static int failover_slave_register(struct net_device *slave_dev)
                goto err_upper_link;
        }
 
-       slave_dev->priv_flags |= IFF_FAILOVER_SLAVE;
+       slave_dev->priv_flags |= (IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
 
        if (fops && fops->slave_register &&
            !fops->slave_register(slave_dev, failover_dev))
                return NOTIFY_OK;
 
        netdev_upper_dev_unlink(slave_dev, failover_dev);
-       slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE;
+       slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
 err_upper_link:
        netdev_rx_handler_unregister(slave_dev);
 done:
@@ -121,7 +121,7 @@ int failover_slave_unregister(struct net_device *slave_dev)
 
        netdev_rx_handler_unregister(slave_dev);
        netdev_upper_dev_unlink(slave_dev, failover_dev);
-       slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE;
+       slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
 
        if (fops && fops->slave_unregister &&
            !fops->slave_unregister(slave_dev, failover_dev))
index 647c63a7b25b6745e75a812b65a4052f3c72b690..27e61ffd903931c45f0a3f2f6e436937058dfb39 100644 (file)
@@ -4383,6 +4383,8 @@ BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr,
         * Only binding to IP is supported.
         */
        err = -EINVAL;
+       if (addr_len < offsetofend(struct sockaddr, sa_family))
+               return err;
        if (addr->sa_family == AF_INET) {
                if (addr_len < sizeof(struct sockaddr_in))
                        return err;
@@ -6613,14 +6615,8 @@ static bool flow_dissector_is_valid_access(int off, int size,
                                           const struct bpf_prog *prog,
                                           struct bpf_insn_access_aux *info)
 {
-       if (type == BPF_WRITE) {
-               switch (off) {
-               case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
-                       break;
-               default:
-                       return false;
-               }
-       }
+       if (type == BPF_WRITE)
+               return false;
 
        switch (off) {
        case bpf_ctx_range(struct __sk_buff, data):
@@ -6632,11 +6628,7 @@ static bool flow_dissector_is_valid_access(int off, int size,
        case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
                info->reg_type = PTR_TO_FLOW_KEYS;
                break;
-       case bpf_ctx_range(struct __sk_buff, tc_classid):
-       case bpf_ctx_range(struct __sk_buff, data_meta):
-       case bpf_ctx_range_till(struct __sk_buff, family, local_port):
-       case bpf_ctx_range(struct __sk_buff, tstamp):
-       case bpf_ctx_range(struct __sk_buff, wire_len):
+       default:
                return false;
        }
 
index bb1a54747d64811a5545a78243f8cf021a4adf46..94a450b2191a9e25ca79534f9caa68a442daa80c 100644 (file)
@@ -707,6 +707,7 @@ bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
        /* Pass parameters to the BPF program */
        memset(flow_keys, 0, sizeof(*flow_keys));
        cb->qdisc_cb.flow_keys = flow_keys;
+       flow_keys->n_proto = skb->protocol;
        flow_keys->nhoff = skb_network_offset(skb);
        flow_keys->thoff = flow_keys->nhoff;
 
@@ -716,7 +717,8 @@ bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
        /* Restore state */
        memcpy(cb, &cb_saved, sizeof(cb_saved));
 
-       flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, 0, skb->len);
+       flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff,
+                                  skb_network_offset(skb), skb->len);
        flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
                                   flow_keys->nhoff, skb->len);
 
index f8f94303a1f57203eaa28b5ea459ac28c89e1b12..8f8b7b6c2945a75406c15e5faac61759a02db717 100644 (file)
@@ -1747,20 +1747,16 @@ int netdev_register_kobject(struct net_device *ndev)
 
        error = device_add(dev);
        if (error)
-               goto error_put_device;
+               return error;
 
        error = register_queue_kobjects(ndev);
-       if (error)
-               goto error_device_del;
+       if (error) {
+               device_del(dev);
+               return error;
+       }
 
        pm_runtime_set_memalloc_noio(dev, true);
 
-       return 0;
-
-error_device_del:
-       device_del(dev);
-error_put_device:
-       put_device(dev);
        return error;
 }
 
index 17f36317363d19dcdeb6a6e75a116220b078c2b0..7e6dcc6257011d8b60e132e97a0db229c39d1daf 100644 (file)
@@ -304,6 +304,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
 
        refcount_set(&net->count, 1);
        refcount_set(&net->passive, 1);
+       get_random_bytes(&net->hash_mix, sizeof(u32));
        net->dev_base_seq = 1;
        net->user_ns = user_ns;
        idr_init(&net->netns_ids);
index 703cf76aa7c2dee7c5b556f5f035c015780f55f0..7109c168b5e0fb20b8b6ad8951893b181803fad8 100644 (file)
@@ -185,9 +185,10 @@ void __init ptp_classifier_init(void)
                { 0x16,  0,  0, 0x00000000 },
                { 0x06,  0,  0, 0x00000000 },
        };
-       struct sock_fprog_kern ptp_prog = {
-               .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
-       };
+       struct sock_fprog_kern ptp_prog;
+
+       ptp_prog.len = ARRAY_SIZE(ptp_filter);
+       ptp_prog.filter = ptp_filter;
 
        BUG_ON(bpf_prog_create(&ptp_insns, &ptp_prog));
 }
index a51cab95ba64c7d76a2ba0940c67e9f6e53f54e1..220c56e936592495656962050d285bb1c0024b37 100644 (file)
@@ -4948,7 +4948,7 @@ static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
 {
        struct if_stats_msg *ifsm;
 
-       if (nlh->nlmsg_len < sizeof(*ifsm)) {
+       if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) {
                NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
                return -EINVAL;
        }
index 2415d9cb9b89fefb30a7932a70c3497aeb67c80e..40796b8bf820450f5d0cce38986bd29137e2fd05 100644 (file)
@@ -3801,7 +3801,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
        unsigned int delta_truesize;
        struct sk_buff *lp;
 
-       if (unlikely(p->len + len >= 65536))
+       if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
                return -E2BIG;
 
        lp = NAPI_GRO_CB(p)->last;
@@ -5083,7 +5083,8 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
 
 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
 {
-       int mac_len;
+       int mac_len, meta_len;
+       void *meta;
 
        if (skb_cow(skb, skb_headroom(skb)) < 0) {
                kfree_skb(skb);
@@ -5095,6 +5096,13 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
                memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
                        mac_len - VLAN_HLEN - ETH_TLEN);
        }
+
+       meta_len = skb_metadata_len(skb);
+       if (meta_len) {
+               meta = skb_metadata_end(skb) - meta_len;
+               memmove(meta + VLAN_HLEN, meta, meta_len);
+       }
+
        skb->mac_header += VLAN_HLEN;
        return skb;
 }
index 782343bb925b643348cc906a70b97caa0388178d..067878a1e4c51363e065e13ccdb2b9d03c6a9c5f 100644 (file)
@@ -348,7 +348,7 @@ static int sock_get_timeout(long timeo, void *optval, bool old_timeval)
                tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ;
        }
 
-       if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
+       if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
                struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec };
                *(struct old_timeval32 *)optval = tv32;
                return sizeof(tv32);
@@ -372,7 +372,7 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen, bool
 {
        struct __kernel_sock_timeval tv;
 
-       if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
+       if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
                struct old_timeval32 tv32;
 
                if (optlen < sizeof(tv32))
index f227f002c73d382fecd98c8857ce4c9139cb7a8a..db87d9f5801983913e66549e5d5911ead10f3ac1 100644 (file)
@@ -738,7 +738,12 @@ static int __feat_register_sp(struct list_head *fn, u8 feat, u8 is_local,
        if (dccp_feat_clone_sp_val(&fval, sp_val, sp_len))
                return -ENOMEM;
 
-       return dccp_feat_push_change(fn, feat, is_local, mandatory, &fval);
+       if (dccp_feat_push_change(fn, feat, is_local, mandatory, &fval)) {
+               kfree(fval.sp.vec);
+               return -ENOMEM;
+       }
+
+       return 0;
 }
 
 /**
index ed4f6dc26365baa3e9988b2f11ac26d8ffeb55b7..85c22ada47449d580ee2a175c0729f4bad2cad61 100644 (file)
@@ -98,8 +98,18 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev,
        return skb;
 }
 
+static int qca_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto,
+                                int *offset)
+{
+       *offset = QCA_HDR_LEN;
+       *proto = ((__be16 *)skb->data)[0];
+
+       return 0;
+}
+
 const struct dsa_device_ops qca_netdev_ops = {
        .xmit   = qca_tag_xmit,
        .rcv    = qca_tag_rcv,
+       .flow_dissect = qca_tag_flow_dissect,
        .overhead = QCA_HDR_LEN,
 };
index 79e98e21cdd7f971694356065afb3f68fb34c1a0..12ce6c526d72bd15a16a9415ed992c25039d415a 100644 (file)
@@ -121,6 +121,7 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
        struct guehdr *guehdr;
        void *data;
        u16 doffset = 0;
+       u8 proto_ctype;
 
        if (!fou)
                return 1;
@@ -212,13 +213,14 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
        if (unlikely(guehdr->control))
                return gue_control_message(skb, guehdr);
 
+       proto_ctype = guehdr->proto_ctype;
        __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
        skb_reset_transport_header(skb);
 
        if (iptunnel_pull_offloads(skb))
                goto drop;
 
-       return -guehdr->proto_ctype;
+       return -proto_ctype;
 
 drop:
        kfree_skb(skb);
index fd219f7bd3ea2c9263ac6d21ed3a66fd6442496c..4b052644147630fbfa8075ee623714ff5013bf94 100644 (file)
@@ -259,7 +259,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
        struct net *net = dev_net(skb->dev);
        struct metadata_dst *tun_dst = NULL;
        struct erspan_base_hdr *ershdr;
-       struct erspan_metadata *pkt_md;
        struct ip_tunnel_net *itn;
        struct ip_tunnel *tunnel;
        const struct iphdr *iph;
@@ -282,9 +281,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
                if (unlikely(!pskb_may_pull(skb, len)))
                        return PACKET_REJECT;
 
-               ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
-               pkt_md = (struct erspan_metadata *)(ershdr + 1);
-
                if (__iptunnel_pull_header(skb,
                                           len,
                                           htons(ETH_P_TEB),
@@ -292,8 +288,9 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
                        goto drop;
 
                if (tunnel->collect_md) {
+                       struct erspan_metadata *pkt_md, *md;
                        struct ip_tunnel_info *info;
-                       struct erspan_metadata *md;
+                       unsigned char *gh;
                        __be64 tun_id;
                        __be16 flags;
 
@@ -306,6 +303,14 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
                        if (!tun_dst)
                                return PACKET_REJECT;
 
+                       /* skb can be uncloned in __iptunnel_pull_header, so
+                        * old pkt_md is no longer valid and we need to reset
+                        * it
+                        */
+                       gh = skb_network_header(skb) +
+                            skb_network_header_len(skb);
+                       pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
+                                                           sizeof(*ershdr));
                        md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
                        md->version = ver;
                        md2 = &md->u.md2;
index ecce2dc78f17eb48d91f8f6638ef0a4e8076fedf..1132d6d1796a4f7c947da76b9b39e7fbe11d3399 100644 (file)
@@ -257,11 +257,10 @@ int ip_local_deliver(struct sk_buff *skb)
                       ip_local_deliver_finish);
 }
 
-static inline bool ip_rcv_options(struct sk_buff *skb)
+static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
 {
        struct ip_options *opt;
        const struct iphdr *iph;
-       struct net_device *dev = skb->dev;
 
        /* It looks as overkill, because not all
           IP options require packet mangling.
@@ -297,7 +296,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
                        }
                }
 
-               if (ip_options_rcv_srr(skb))
+               if (ip_options_rcv_srr(skb, dev))
                        goto drop;
        }
 
@@ -353,7 +352,7 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
        }
 #endif
 
-       if (iph->ihl > 5 && ip_rcv_options(skb))
+       if (iph->ihl > 5 && ip_rcv_options(skb, dev))
                goto drop;
 
        rt = skb_rtable(skb);
index 32a35043c9f590314b7fa354d5e948b59e665214..3db31bb9df50622f8c9ae961f4eabc566d1cb74a 100644 (file)
@@ -612,7 +612,7 @@ void ip_forward_options(struct sk_buff *skb)
        }
 }
 
-int ip_options_rcv_srr(struct sk_buff *skb)
+int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
 {
        struct ip_options *opt = &(IPCB(skb)->opt);
        int srrspace, srrptr;
@@ -647,7 +647,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
 
                orefdst = skb->_skb_refdst;
                skb_dst_set(skb, NULL);
-               err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev);
+               err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
                rt2 = skb_rtable(skb);
                if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
                        skb_dst_drop(skb);
index a5da63e5faa2d8118d3044a5a79b5e51bf61cafc..88ce038dd495dec1d34867eb40091c61141e9acb 100644 (file)
@@ -1185,9 +1185,23 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
 
 static void ipv4_link_failure(struct sk_buff *skb)
 {
+       struct ip_options opt;
        struct rtable *rt;
+       int res;
 
-       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
+       /* Recompile ip options since IPCB may not be valid anymore.
+        */
+       memset(&opt, 0, sizeof(opt));
+       opt.optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
+
+       rcu_read_lock();
+       res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
+       rcu_read_unlock();
+
+       if (res)
+               return;
+
+       __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
 
        rt = skb_rtable(skb);
        if (rt)
index cd4814f7e96223447195f0d0ac224c54d5501d2e..477cb4aa456c11c70185a982cbadafba857d3619 100644 (file)
@@ -49,9 +49,8 @@
 #define DCTCP_MAX_ALPHA        1024U
 
 struct dctcp {
-       u32 acked_bytes_ecn;
-       u32 acked_bytes_total;
-       u32 prior_snd_una;
+       u32 old_delivered;
+       u32 old_delivered_ce;
        u32 prior_rcv_nxt;
        u32 dctcp_alpha;
        u32 next_seq;
@@ -67,19 +66,14 @@ static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
 module_param(dctcp_alpha_on_init, uint, 0644);
 MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
 
-static unsigned int dctcp_clamp_alpha_on_loss __read_mostly;
-module_param(dctcp_clamp_alpha_on_loss, uint, 0644);
-MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss,
-                "parameter for clamping alpha on loss");
-
 static struct tcp_congestion_ops dctcp_reno;
 
 static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
 {
        ca->next_seq = tp->snd_nxt;
 
-       ca->acked_bytes_ecn = 0;
-       ca->acked_bytes_total = 0;
+       ca->old_delivered = tp->delivered;
+       ca->old_delivered_ce = tp->delivered_ce;
 }
 
 static void dctcp_init(struct sock *sk)
@@ -91,7 +85,6 @@ static void dctcp_init(struct sock *sk)
             sk->sk_state == TCP_CLOSE)) {
                struct dctcp *ca = inet_csk_ca(sk);
 
-               ca->prior_snd_una = tp->snd_una;
                ca->prior_rcv_nxt = tp->rcv_nxt;
 
                ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
@@ -123,37 +116,25 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        struct dctcp *ca = inet_csk_ca(sk);
-       u32 acked_bytes = tp->snd_una - ca->prior_snd_una;
-
-       /* If ack did not advance snd_una, count dupack as MSS size.
-        * If ack did update window, do not count it at all.
-        */
-       if (acked_bytes == 0 && !(flags & CA_ACK_WIN_UPDATE))
-               acked_bytes = inet_csk(sk)->icsk_ack.rcv_mss;
-       if (acked_bytes) {
-               ca->acked_bytes_total += acked_bytes;
-               ca->prior_snd_una = tp->snd_una;
-
-               if (flags & CA_ACK_ECE)
-                       ca->acked_bytes_ecn += acked_bytes;
-       }
 
        /* Expired RTT */
        if (!before(tp->snd_una, ca->next_seq)) {
-               u64 bytes_ecn = ca->acked_bytes_ecn;
+               u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce;
                u32 alpha = ca->dctcp_alpha;
 
                /* alpha = (1 - g) * alpha + g * F */
 
                alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
-               if (bytes_ecn) {
+               if (delivered_ce) {
+                       u32 delivered = tp->delivered - ca->old_delivered;
+
                        /* If dctcp_shift_g == 1, a 32bit value would overflow
-                        * after 8 Mbytes.
+                        * after 8 M packets.
                         */
-                       bytes_ecn <<= (10 - dctcp_shift_g);
-                       do_div(bytes_ecn, max(1U, ca->acked_bytes_total));
+                       delivered_ce <<= (10 - dctcp_shift_g);
+                       delivered_ce /= max(1U, delivered);
 
-                       alpha = min(alpha + (u32)bytes_ecn, DCTCP_MAX_ALPHA);
+                       alpha = min(alpha + delivered_ce, DCTCP_MAX_ALPHA);
                }
                /* dctcp_alpha can be read from dctcp_get_info() without
                 * synchro, so we ask compiler to not use dctcp_alpha
@@ -164,21 +145,23 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
        }
 }
 
-static void dctcp_state(struct sock *sk, u8 new_state)
+static void dctcp_react_to_loss(struct sock *sk)
 {
-       if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) {
-               struct dctcp *ca = inet_csk_ca(sk);
+       struct dctcp *ca = inet_csk_ca(sk);
+       struct tcp_sock *tp = tcp_sk(sk);
 
-               /* If this extension is enabled, we clamp dctcp_alpha to
-                * max on packet loss; the motivation is that dctcp_alpha
-                * is an indicator to the extend of congestion and packet
-                * loss is an indicator of extreme congestion; setting
-                * this in practice turned out to be beneficial, and
-                * effectively assumes total congestion which reduces the
-                * window by half.
-                */
-               ca->dctcp_alpha = DCTCP_MAX_ALPHA;
-       }
+       ca->loss_cwnd = tp->snd_cwnd;
+       tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
+}
+
+static void dctcp_state(struct sock *sk, u8 new_state)
+{
+       if (new_state == TCP_CA_Recovery &&
+           new_state != inet_csk(sk)->icsk_ca_state)
+               dctcp_react_to_loss(sk);
+       /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
+        * one loss-adjustment per RTT.
+        */
 }
 
 static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
@@ -190,6 +173,9 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
        case CA_EVENT_ECN_NO_CE:
                dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
                break;
+       case CA_EVENT_LOSS:
+               dctcp_react_to_loss(sk);
+               break;
        default:
                /* Don't care for the rest. */
                break;
@@ -200,6 +186,7 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
                             union tcp_cc_info *info)
 {
        const struct dctcp *ca = inet_csk_ca(sk);
+       const struct tcp_sock *tp = tcp_sk(sk);
 
        /* Fill it also in case of VEGASINFO due to req struct limits.
         * We can still correctly retrieve it later.
@@ -211,8 +198,10 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
                        info->dctcp.dctcp_enabled = 1;
                        info->dctcp.dctcp_ce_state = (u16) ca->ce_state;
                        info->dctcp.dctcp_alpha = ca->dctcp_alpha;
-                       info->dctcp.dctcp_ab_ecn = ca->acked_bytes_ecn;
-                       info->dctcp.dctcp_ab_tot = ca->acked_bytes_total;
+                       info->dctcp.dctcp_ab_ecn = tp->mss_cache *
+                                                  (tp->delivered_ce - ca->old_delivered_ce);
+                       info->dctcp.dctcp_ab_tot = tp->mss_cache *
+                                                  (tp->delivered - ca->old_delivered);
                }
 
                *attr = INET_DIAG_DCTCPINFO;
index 5def3c48870e17f42ac9424a6ee091ac4824dabc..731d3045b50a0fb9a89c887a154db9a3da8c7ddd 100644 (file)
@@ -402,11 +402,12 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
 static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       int room;
+
+       room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
 
        /* Check #1 */
-       if (tp->rcv_ssthresh < tp->window_clamp &&
-           (int)tp->rcv_ssthresh < tcp_space(sk) &&
-           !tcp_under_memory_pressure(sk)) {
+       if (room > 0 && !tcp_under_memory_pressure(sk)) {
                int incr;
 
                /* Check #2. Increase window, if skb with such overhead
@@ -419,8 +420,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
 
                if (incr) {
                        incr = max_t(int, incr, 2 * skb->len);
-                       tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
-                                              tp->window_clamp);
+                       tp->rcv_ssthresh += min(room, incr);
                        inet_csk(sk)->icsk_ack.quick |= 1;
                }
        }
index 277d71239d755d858be70663320d8de2ab23dfcc..2f8039a26b08fa2b13b5e4da642c0f4ff8207571 100644 (file)
@@ -2578,7 +2578,8 @@ static void __net_exit tcp_sk_exit(struct net *net)
 {
        int cpu;
 
-       module_put(net->ipv4.tcp_congestion_control->owner);
+       if (net->ipv4.tcp_congestion_control)
+               module_put(net->ipv4.tcp_congestion_control->owner);
 
        for_each_possible_cpu(cpu)
                inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
index 79d2e43c05c5e792b1498c4bf5f73756252e2c7d..5fc1f4e0c0cf0d3dd403c2dcaf291ea9c096d235 100644 (file)
@@ -417,6 +417,7 @@ int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info)
 
 done:
        rhashtable_walk_stop(&iter);
+       rhashtable_walk_exit(&iter);
        return ret;
 }
 
index b32c95f0212809006455cb79768f96bd1c516994..655e46b227f9eb99e43369ffb96a411bd662eadb 100644 (file)
@@ -525,10 +525,10 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
 }
 
 static int ip6erspan_rcv(struct sk_buff *skb,
-                        struct tnl_ptk_info *tpi)
+                        struct tnl_ptk_info *tpi,
+                        int gre_hdr_len)
 {
        struct erspan_base_hdr *ershdr;
-       struct erspan_metadata *pkt_md;
        const struct ipv6hdr *ipv6h;
        struct erspan_md2 *md2;
        struct ip6_tnl *tunnel;
@@ -547,18 +547,16 @@ static int ip6erspan_rcv(struct sk_buff *skb,
                if (unlikely(!pskb_may_pull(skb, len)))
                        return PACKET_REJECT;
 
-               ershdr = (struct erspan_base_hdr *)skb->data;
-               pkt_md = (struct erspan_metadata *)(ershdr + 1);
-
                if (__iptunnel_pull_header(skb, len,
                                           htons(ETH_P_TEB),
                                           false, false) < 0)
                        return PACKET_REJECT;
 
                if (tunnel->parms.collect_md) {
+                       struct erspan_metadata *pkt_md, *md;
                        struct metadata_dst *tun_dst;
                        struct ip_tunnel_info *info;
-                       struct erspan_metadata *md;
+                       unsigned char *gh;
                        __be64 tun_id;
                        __be16 flags;
 
@@ -571,6 +569,14 @@ static int ip6erspan_rcv(struct sk_buff *skb,
                        if (!tun_dst)
                                return PACKET_REJECT;
 
+                       /* skb can be uncloned in __iptunnel_pull_header, so
+                        * old pkt_md is no longer valid and we need to reset
+                        * it
+                        */
+                       gh = skb_network_header(skb) +
+                            skb_network_header_len(skb);
+                       pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
+                                                           sizeof(*ershdr));
                        info = &tun_dst->u.tun_info;
                        md = ip_tunnel_info_opts(info);
                        md->version = ver;
@@ -607,7 +613,7 @@ static int gre_rcv(struct sk_buff *skb)
 
        if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
                     tpi.proto == htons(ETH_P_ERSPAN2))) {
-               if (ip6erspan_rcv(skb, &tpi) == PACKET_RCVD)
+               if (ip6erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
                        return 0;
                goto out;
        }
index edbd12067170bc77332d57a04c96812d9702520b..e51f3c648b094afe1d60a518db36a42444c4c55d 100644 (file)
@@ -601,7 +601,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                                inet6_sk(skb->sk) : NULL;
        struct ipv6hdr *tmp_hdr;
        struct frag_hdr *fh;
-       unsigned int mtu, hlen, left, len;
+       unsigned int mtu, hlen, left, len, nexthdr_offset;
        int hroom, troom;
        __be32 frag_id;
        int ptr, offset = 0, err = 0;
@@ -612,6 +612,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                goto fail;
        hlen = err;
        nexthdr = *prevhdr;
+       nexthdr_offset = prevhdr - skb_network_header(skb);
 
        mtu = ip6_skb_dst_mtu(skb);
 
@@ -646,6 +647,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
            (err = skb_checksum_help(skb)))
                goto fail;
 
+       prevhdr = skb_network_header(skb) + nexthdr_offset;
        hroom = LL_RESERVED_SPACE(rt->dst.dev);
        if (skb_has_frag_list(skb)) {
                unsigned int first_len = skb_pagelen(skb);
index 0c6403cf8b5226fbe4bf2e4506b3816b30973b0b..ade1390c63488a60b405ca70052b3493fecc67d5 100644 (file)
@@ -627,7 +627,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
                                           eiph->daddr, eiph->saddr, 0, 0,
                                           IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
-               if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL) {
+               if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) {
                        if (!IS_ERR(rt))
                                ip_rt_put(rt);
                        goto out;
@@ -636,7 +636,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        } else {
                if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
                                   skb2->dev) ||
-                   skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
+                   skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
                        goto out;
        }
 
index 0302e0eb07af1d270a615bcadfcb9bc08ca61d6c..7178e32eb15d0a969eb39fcfec9973bb0150bf48 100644 (file)
@@ -2330,6 +2330,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
 
                rcu_read_lock();
                from = rcu_dereference(rt6->from);
+               if (!from) {
+                       rcu_read_unlock();
+                       return;
+               }
                nrt6 = ip6_rt_cache_alloc(from, daddr, saddr);
                if (nrt6) {
                        rt6_do_update_pmtu(nrt6, mtu);
index 07e21a82ce4cc2e41af8e38961f9917d357fd20b..b2109b74857d053f52b06c42698cc393d5838609 100644 (file)
@@ -669,6 +669,10 @@ static int ipip6_rcv(struct sk_buff *skb)
                    !net_eq(tunnel->net, dev_net(tunnel->dev))))
                        goto out;
 
+               /* skb can be uncloned in iptunnel_pull_header, so
+                * old iph is no longer valid
+                */
+               iph = (const struct iphdr *)skb_mac_header(skb);
                err = IP_ECN_decapsulate(iph, skb);
                if (unlikely(err)) {
                        if (log_ecn_error)
index b444483cdb2b42ef7acdbd7d23a0c046f55077c2..622eeaf5732b39b97752eefb864133e46b27a15d 100644 (file)
@@ -1047,6 +1047,8 @@ static void udp_v6_flush_pending_frames(struct sock *sk)
 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
                             int addr_len)
 {
+       if (addr_len < offsetofend(struct sockaddr, sa_family))
+               return -EINVAL;
        /* The following checks are replicated from __ip6_datagram_connect()
         * and intended to prevent BPF program called below from accessing
         * bytes that are out of the bound specified by user in addr_len.
index c5c5ab6c5a1ccdf55eb7891e8c21ea3cdf7d2a28..44fdc641710dbdb64cedb3306ab822573cb97477 100644 (file)
@@ -2054,14 +2054,14 @@ static int __init kcm_init(void)
        if (err)
                goto fail;
 
-       err = sock_register(&kcm_family_ops);
-       if (err)
-               goto sock_register_fail;
-
        err = register_pernet_device(&kcm_net_ops);
        if (err)
                goto net_ops_fail;
 
+       err = sock_register(&kcm_family_ops);
+       if (err)
+               goto sock_register_fail;
+
        err = kcm_proc_init();
        if (err)
                goto proc_init_fail;
@@ -2069,12 +2069,12 @@ static int __init kcm_init(void)
        return 0;
 
 proc_init_fail:
-       unregister_pernet_device(&kcm_net_ops);
-
-net_ops_fail:
        sock_unregister(PF_KCM);
 
 sock_register_fail:
+       unregister_pernet_device(&kcm_net_ops);
+
+net_ops_fail:
        proto_unregister(&kcm_proto);
 
 fail:
@@ -2090,8 +2090,8 @@ fail:
 static void __exit kcm_exit(void)
 {
        kcm_proc_exit();
-       unregister_pernet_device(&kcm_net_ops);
        sock_unregister(PF_KCM);
+       unregister_pernet_device(&kcm_net_ops);
        proto_unregister(&kcm_proto);
        destroy_workqueue(kcm_wq);
 
index b99e73a7e7e0f2b4959b279e3aecbadf29667d55..2017b7d780f5af73c1ac7461113842776d1b00fc 100644 (file)
@@ -320,14 +320,13 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
        struct llc_sap *sap;
        int rc = -EINVAL;
 
-       dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
-
        lock_sock(sk);
        if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
                goto out;
        rc = -EAFNOSUPPORT;
        if (unlikely(addr->sllc_family != AF_LLC))
                goto out;
+       dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
        rc = -ENODEV;
        rcu_read_lock();
        if (sk->sk_bound_dev_if) {
index 28d022a3eee305bc9d04531eb6b70d3b57412d93..ae4f0be3b393ba727b95060bb7148ec0cd961440 100644 (file)
@@ -1195,6 +1195,9 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
 {
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif);
 
+       if (local->in_reconfig)
+               return;
+
        if (!check_sdata_in_driver(sdata))
                return;
 
index 4700718e010f5a886001e9a0a0326a628edf0739..37e372896230a08c6a9214f88ce54e7ad823d352 100644 (file)
@@ -167,8 +167,10 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
                 * The driver doesn't know anything about VLAN interfaces.
                 * Hence, don't send GTKs for VLAN interfaces to the driver.
                 */
-               if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE))
+               if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+                       ret = 1;
                        goto out_unsupported;
+               }
        }
 
        ret = drv_set_key(key->local, SET_KEY, sdata,
@@ -213,11 +215,8 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
                /* all of these we can do in software - if driver can */
                if (ret == 1)
                        return 0;
-               if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL)) {
-                       if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
-                               return 0;
+               if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL))
                        return -EINVAL;
-               }
                return 0;
        default:
                return -EINVAL;
index 95eb5064fa9166220bf67af98dedf83726ffcdc8..b76a2aefa9ec05e5162ab565a108b5b98848116f 100644 (file)
@@ -23,7 +23,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath);
 static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
 {
        /* Use last four bytes of hw addr as hash index */
-       return jhash_1word(*(u32 *)(addr+2), seed);
+       return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed);
 }
 
 static const struct rhashtable_params mesh_rht_params = {
index 7f8d93401ce070f9e2e61ce6a84e5ab8768b5811..bf0b187f994e9c56e191d2045f405cb6e6bac336 100644 (file)
@@ -1568,7 +1568,15 @@ static void sta_ps_start(struct sta_info *sta)
                return;
 
        for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
-               if (txq_has_queue(sta->sta.txq[tid]))
+               struct ieee80211_txq *txq = sta->sta.txq[tid];
+               struct txq_info *txqi = to_txq_info(txq);
+
+               spin_lock(&local->active_txq_lock[txq->ac]);
+               if (!list_empty(&txqi->schedule_order))
+                       list_del_init(&txqi->schedule_order);
+               spin_unlock(&local->active_txq_lock[txq->ac]);
+
+               if (txq_has_queue(txq))
                        set_bit(tid, &sta->txq_buffered_tids);
                else
                        clear_bit(tid, &sta->txq_buffered_tids);
index 366b9e6f043e2df89eccb4d63a9fb3ab1d7db023..40141df09f255fac46043f67656e98e16adda5b9 100644 (file)
@@ -1,4 +1,9 @@
 /* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Portions of this file
+ * Copyright (C) 2019 Intel Corporation
+ */
+
 #ifdef CONFIG_MAC80211_MESSAGE_TRACING
 
 #if !defined(__MAC80211_MSG_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
@@ -11,7 +16,7 @@
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM mac80211_msg
 
-#define MAX_MSG_LEN    100
+#define MAX_MSG_LEN    120
 
 DECLARE_EVENT_CLASS(mac80211_msg_event,
        TP_PROTO(struct va_format *vaf),
index 8a49a74c0a374815ca2f374510216b334eb00013..2e816dd67be72d161bf1959554d293f2f6725673 100644 (file)
@@ -3221,6 +3221,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        u8 max_subframes = sta->sta.max_amsdu_subframes;
        int max_frags = local->hw.max_tx_fragments;
        int max_amsdu_len = sta->sta.max_amsdu_len;
+       int orig_truesize;
        __be16 len;
        void *data;
        bool ret = false;
@@ -3261,6 +3262,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        if (!head || skb_is_gso(head))
                goto out;
 
+       orig_truesize = head->truesize;
        orig_len = head->len;
 
        if (skb->len + head->len > max_amsdu_len)
@@ -3318,6 +3320,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        *frag_tail = skb;
 
 out_recalc:
+       fq->memory_usage += head->truesize - orig_truesize;
        if (head->len != orig_len) {
                flow->backlog += head->len - orig_len;
                tin->backlog_bytes += head->len - orig_len;
@@ -3646,16 +3649,17 @@ EXPORT_SYMBOL(ieee80211_tx_dequeue);
 struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
 {
        struct ieee80211_local *local = hw_to_local(hw);
+       struct ieee80211_txq *ret = NULL;
        struct txq_info *txqi = NULL;
 
-       lockdep_assert_held(&local->active_txq_lock[ac]);
+       spin_lock_bh(&local->active_txq_lock[ac]);
 
  begin:
        txqi = list_first_entry_or_null(&local->active_txqs[ac],
                                        struct txq_info,
                                        schedule_order);
        if (!txqi)
-               return NULL;
+               goto out;
 
        if (txqi->txq.sta) {
                struct sta_info *sta = container_of(txqi->txq.sta,
@@ -3672,24 +3676,30 @@ struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
 
 
        if (txqi->schedule_round == local->schedule_round[ac])
-               return NULL;
+               goto out;
 
        list_del_init(&txqi->schedule_order);
        txqi->schedule_round = local->schedule_round[ac];
-       return &txqi->txq;
+       ret = &txqi->txq;
+
+out:
+       spin_unlock_bh(&local->active_txq_lock[ac]);
+       return ret;
 }
 EXPORT_SYMBOL(ieee80211_next_txq);
 
-void ieee80211_return_txq(struct ieee80211_hw *hw,
-                         struct ieee80211_txq *txq)
+void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
+                             struct ieee80211_txq *txq,
+                             bool force)
 {
        struct ieee80211_local *local = hw_to_local(hw);
        struct txq_info *txqi = to_txq_info(txq);
 
-       lockdep_assert_held(&local->active_txq_lock[txq->ac]);
+       spin_lock_bh(&local->active_txq_lock[txq->ac]);
 
        if (list_empty(&txqi->schedule_order) &&
-           (!skb_queue_empty(&txqi->frags) || txqi->tin.backlog_packets)) {
+           (force || !skb_queue_empty(&txqi->frags) ||
+            txqi->tin.backlog_packets)) {
                /* If airtime accounting is active, always enqueue STAs at the
                 * head of the list to ensure that they only get moved to the
                 * back by the airtime DRR scheduler once they have a negative
@@ -3706,20 +3716,10 @@ void ieee80211_return_txq(struct ieee80211_hw *hw,
                        list_add_tail(&txqi->schedule_order,
                                      &local->active_txqs[txq->ac]);
        }
-}
-EXPORT_SYMBOL(ieee80211_return_txq);
 
-void ieee80211_schedule_txq(struct ieee80211_hw *hw,
-                           struct ieee80211_txq *txq)
-       __acquires(txq_lock) __releases(txq_lock)
-{
-       struct ieee80211_local *local = hw_to_local(hw);
-
-       spin_lock_bh(&local->active_txq_lock[txq->ac]);
-       ieee80211_return_txq(hw, txq);
        spin_unlock_bh(&local->active_txq_lock[txq->ac]);
 }
-EXPORT_SYMBOL(ieee80211_schedule_txq);
+EXPORT_SYMBOL(__ieee80211_schedule_txq);
 
 bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
                                struct ieee80211_txq *txq)
@@ -3729,7 +3729,7 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
        struct sta_info *sta;
        u8 ac = txq->ac;
 
-       lockdep_assert_held(&local->active_txq_lock[ac]);
+       spin_lock_bh(&local->active_txq_lock[ac]);
 
        if (!txqi->txq.sta)
                goto out;
@@ -3759,34 +3759,27 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
 
        sta->airtime[ac].deficit += sta->airtime_weight;
        list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]);
+       spin_unlock_bh(&local->active_txq_lock[ac]);
 
        return false;
 out:
        if (!list_empty(&txqi->schedule_order))
                list_del_init(&txqi->schedule_order);
+       spin_unlock_bh(&local->active_txq_lock[ac]);
 
        return true;
 }
 EXPORT_SYMBOL(ieee80211_txq_may_transmit);
 
 void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
-       __acquires(txq_lock)
 {
        struct ieee80211_local *local = hw_to_local(hw);
 
        spin_lock_bh(&local->active_txq_lock[ac]);
        local->schedule_round[ac]++;
-}
-EXPORT_SYMBOL(ieee80211_txq_schedule_start);
-
-void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
-       __releases(txq_lock)
-{
-       struct ieee80211_local *local = hw_to_local(hw);
-
        spin_unlock_bh(&local->active_txq_lock[ac]);
 }
-EXPORT_SYMBOL(ieee80211_txq_schedule_end);
+EXPORT_SYMBOL(ieee80211_txq_schedule_start);
 
 void __ieee80211_subif_start_xmit(struct sk_buff *skb,
                                  struct net_device *dev,
index f28e937320a3b453371143a035e6967482d17cd4..216ab915dd54d4ad7f205aac9f0ab3e3291a2684 100644 (file)
@@ -988,7 +988,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
        struct netlink_sock *nlk = nlk_sk(sk);
        struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
        int err = 0;
-       unsigned long groups = nladdr->nl_groups;
+       unsigned long groups;
        bool bound;
 
        if (addr_len < sizeof(struct sockaddr_nl))
@@ -996,6 +996,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
 
        if (nladdr->nl_family != AF_NETLINK)
                return -EINVAL;
+       groups = nladdr->nl_groups;
 
        /* Only superuser is allowed to listen multicasts */
        if (groups) {
index 1d3144d1990352f4eb8942220e03e225e01af19f..71ffd1a6dc7c6063c00f4c82f985fe9fc0d80dc0 100644 (file)
@@ -1392,18 +1392,22 @@ static int __init nr_proto_init(void)
        int i;
        int rc = proto_register(&nr_proto, 0);
 
-       if (rc != 0)
-               goto out;
+       if (rc)
+               return rc;
 
        if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) {
-               printk(KERN_ERR "NET/ROM: nr_proto_init - nr_ndevs parameter to large\n");
-               return -1;
+               pr_err("NET/ROM: %s - nr_ndevs parameter too large\n",
+                      __func__);
+               rc = -EINVAL;
+               goto unregister_proto;
        }
 
        dev_nr = kcalloc(nr_ndevs, sizeof(struct net_device *), GFP_KERNEL);
-       if (dev_nr == NULL) {
-               printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n");
-               return -1;
+       if (!dev_nr) {
+               pr_err("NET/ROM: %s - unable to allocate device array\n",
+                      __func__);
+               rc = -ENOMEM;
+               goto unregister_proto;
        }
 
        for (i = 0; i < nr_ndevs; i++) {
@@ -1413,13 +1417,13 @@ static int __init nr_proto_init(void)
                sprintf(name, "nr%d", i);
                dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, nr_setup);
                if (!dev) {
-                       printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n");
+                       rc = -ENOMEM;
                        goto fail;
                }
 
                dev->base_addr = i;
-               if (register_netdev(dev)) {
-                       printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n");
+               rc = register_netdev(dev);
+               if (rc) {
                        free_netdev(dev);
                        goto fail;
                }
@@ -1427,36 +1431,64 @@ static int __init nr_proto_init(void)
                dev_nr[i] = dev;
        }
 
-       if (sock_register(&nr_family_ops)) {
-               printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register socket family\n");
+       rc = sock_register(&nr_family_ops);
+       if (rc)
                goto fail;
-       }
 
-       register_netdevice_notifier(&nr_dev_notifier);
+       rc = register_netdevice_notifier(&nr_dev_notifier);
+       if (rc)
+               goto out_sock;
 
        ax25_register_pid(&nr_pid);
        ax25_linkfail_register(&nr_linkfail_notifier);
 
 #ifdef CONFIG_SYSCTL
-       nr_register_sysctl();
+       rc = nr_register_sysctl();
+       if (rc)
+               goto out_sysctl;
 #endif
 
        nr_loopback_init();
 
-       proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops);
-       proc_create_seq("nr_neigh", 0444, init_net.proc_net, &nr_neigh_seqops);
-       proc_create_seq("nr_nodes", 0444, init_net.proc_net, &nr_node_seqops);
-out:
-       return rc;
+       rc = -ENOMEM;
+       if (!proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops))
+               goto proc_remove1;
+       if (!proc_create_seq("nr_neigh", 0444, init_net.proc_net,
+                            &nr_neigh_seqops))
+               goto proc_remove2;
+       if (!proc_create_seq("nr_nodes", 0444, init_net.proc_net,
+                            &nr_node_seqops))
+               goto proc_remove3;
+
+       return 0;
+
+proc_remove3:
+       remove_proc_entry("nr_neigh", init_net.proc_net);
+proc_remove2:
+       remove_proc_entry("nr", init_net.proc_net);
+proc_remove1:
+
+       nr_loopback_clear();
+       nr_rt_free();
+
+#ifdef CONFIG_SYSCTL
+       nr_unregister_sysctl();
+out_sysctl:
+#endif
+       ax25_linkfail_release(&nr_linkfail_notifier);
+       ax25_protocol_release(AX25_P_NETROM);
+       unregister_netdevice_notifier(&nr_dev_notifier);
+out_sock:
+       sock_unregister(PF_NETROM);
 fail:
        while (--i >= 0) {
                unregister_netdev(dev_nr[i]);
                free_netdev(dev_nr[i]);
        }
        kfree(dev_nr);
+unregister_proto:
        proto_unregister(&nr_proto);
-       rc = -1;
-       goto out;
+       return rc;
 }
 
 module_init(nr_proto_init);
index 215ad22a96476ebb9d30919e99d67bda8e1ce88f..93d13f01998133a2b6c6b3256bb19679f14cea65 100644 (file)
@@ -70,7 +70,7 @@ static void nr_loopback_timer(struct timer_list *unused)
        }
 }
 
-void __exit nr_loopback_clear(void)
+void nr_loopback_clear(void)
 {
        del_timer_sync(&loopback_timer);
        skb_queue_purge(&loopback_queue);
index 6485f593e2f09bc3f215e2ad2c638154de738487..b76aa668a94bce6c6d1280d5cbf307d6ce94e013 100644 (file)
@@ -953,7 +953,7 @@ const struct seq_operations nr_neigh_seqops = {
 /*
  *     Free all memory associated with the nodes and routes lists.
  */
-void __exit nr_rt_free(void)
+void nr_rt_free(void)
 {
        struct nr_neigh *s = NULL;
        struct nr_node  *t = NULL;
index ba1c368b3f186e140149a75e8d98dee24587a020..771011b84270e87854a8c47db1c0253640449fcc 100644 (file)
@@ -146,9 +146,12 @@ static struct ctl_table nr_table[] = {
        { }
 };
 
-void __init nr_register_sysctl(void)
+int __init nr_register_sysctl(void)
 {
        nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table);
+       if (!nr_table_header)
+               return -ENOMEM;
+       return 0;
 }
 
 void nr_unregister_sysctl(void)
index ddfc52ac1f9b4391cb8b6e0f107658b1ee011565..c0d323b58e732318cc352be35bf940693b9bd028 100644 (file)
@@ -312,6 +312,10 @@ static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe,
                create_info = (struct nci_hci_create_pipe_resp *)skb->data;
                dest_gate = create_info->dest_gate;
                new_pipe = create_info->pipe;
+               if (new_pipe >= NCI_HCI_MAX_PIPES) {
+                       status = NCI_HCI_ANY_E_NOK;
+                       goto exit;
+               }
 
                /* Save the new created pipe and bind with local gate,
                 * the description for skb->data[3] is destination gate id
@@ -336,6 +340,10 @@ static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe,
                        goto exit;
                }
                delete_info = (struct nci_hci_delete_pipe_noti *)skb->data;
+               if (delete_info->pipe >= NCI_HCI_MAX_PIPES) {
+                       status = NCI_HCI_ANY_E_NOK;
+                       goto exit;
+               }
 
                ndev->hci_dev->pipes[delete_info->pipe].gate =
                                                NCI_HCI_INVALID_GATE;
index 691da853bef5cb801d963cae4e9bf7b23a3dddd6..4bdf5e3ac2087a67e715ebd720159205697668a8 100644 (file)
@@ -2306,14 +2306,14 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
 
        struct sw_flow_actions *acts;
        int new_acts_size;
-       int req_size = NLA_ALIGN(attr_len);
+       size_t req_size = NLA_ALIGN(attr_len);
        int next_offset = offsetof(struct sw_flow_actions, actions) +
                                        (*sfa)->actions_len;
 
        if (req_size <= (ksize(*sfa) - next_offset))
                goto out;
 
-       new_acts_size = ksize(*sfa) * 2;
+       new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
 
        if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
                if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
index d6cc97fbbbb02458d958a8f493e37e6249db4db6..2b969f99ef1311f845baea874a985714cb051c7c 100644 (file)
@@ -543,6 +543,9 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr,
        struct rds_sock *rs = rds_sk_to_rs(sk);
        int ret = 0;
 
+       if (addr_len < offsetofend(struct sockaddr, sa_family))
+               return -EINVAL;
+
        lock_sock(sk);
 
        switch (uaddr->sa_family) {
index 17c9d9f0c8483b4b0a887e69e7caac246c369423..0f4398e7f2a7add7c20b6fdd333c40af4e719c92 100644 (file)
@@ -173,6 +173,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        /* We allow an RDS socket to be bound to either IPv4 or IPv6
         * address.
         */
+       if (addr_len < offsetofend(struct sockaddr, sa_family))
+               return -EINVAL;
        if (uaddr->sa_family == AF_INET) {
                struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
 
index fd2694174607405ab96f6f0dea10bc8dcc8caea9..faf726e00e27c75b11721dbc55518ca60bdf00a6 100644 (file)
@@ -608,7 +608,7 @@ static void rds_tcp_kill_sock(struct net *net)
        list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
                struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
 
-               if (net != c_net || !tc->t_sock)
+               if (net != c_net)
                        continue;
                if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
                        list_move_tail(&tc->t_tcp_node, &tmp_list);
index 96f2952bbdfd6e62ffcec87f0a565378abbfe4f5..ae8c5d7f3bf1e29460e5b96b05b7b1b1ecd4ce15 100644 (file)
@@ -135,7 +135,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
        struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr;
        struct rxrpc_local *local;
        struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
-       u16 service_id = srx->srx_service;
+       u16 service_id;
        int ret;
 
        _enter("%p,%p,%d", rx, saddr, len);
@@ -143,6 +143,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
        ret = rxrpc_validate_address(rx, srx, len);
        if (ret < 0)
                goto error;
+       service_id = srx->srx_service;
 
        lock_sock(&rx->sk);
 
@@ -370,18 +371,22 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call);
  * rxrpc_kernel_check_life - Check to see whether a call is still alive
  * @sock: The socket the call is on
  * @call: The call to check
+ * @_life: Where to store the life value
  *
  * Allow a kernel service to find out whether a call is still alive - ie. we're
- * getting ACKs from the server.  Returns a number representing the life state
- * which can be compared to that returned by a previous call.
+ * getting ACKs from the server.  Passes back in *_life a number representing
+ * the life state which can be compared to that returned by a previous call and
+ * return true if the call is still alive.
  *
  * If the life state stalls, rxrpc_kernel_probe_life() should be called and
  * then 2RTT waited.
  */
-u32 rxrpc_kernel_check_life(const struct socket *sock,
-                           const struct rxrpc_call *call)
+bool rxrpc_kernel_check_life(const struct socket *sock,
+                            const struct rxrpc_call *call,
+                            u32 *_life)
 {
-       return call->acks_latest;
+       *_life = call->acks_latest;
+       return call->state != RXRPC_CALL_COMPLETE;
 }
 EXPORT_SYMBOL(rxrpc_kernel_check_life);
 
index 4b1a534d290a79e3f035ee60766b4f2ebb2e35c2..062ca9dc29b8ab2fa7381c606791d4fd39657962 100644 (file)
@@ -654,6 +654,7 @@ struct rxrpc_call {
        u8                      ackr_reason;    /* reason to ACK */
        u16                     ackr_skew;      /* skew on packet being ACK'd */
        rxrpc_serial_t          ackr_serial;    /* serial of packet being ACK'd */
+       rxrpc_serial_t          ackr_first_seq; /* first sequence number received */
        rxrpc_seq_t             ackr_prev_seq;  /* previous sequence number received */
        rxrpc_seq_t             ackr_consumed;  /* Highest packet shown consumed */
        rxrpc_seq_t             ackr_seen;      /* Highest packet shown seen */
index b6fca8ebb1173f4de1047e96315c26072666c2e9..8d31fb4c51e17c1934face0f4320dfd219525a66 100644 (file)
@@ -153,7 +153,8 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
  * pass a connection-level abort onto all calls on that connection
  */
 static void rxrpc_abort_calls(struct rxrpc_connection *conn,
-                             enum rxrpc_call_completion compl)
+                             enum rxrpc_call_completion compl,
+                             rxrpc_serial_t serial)
 {
        struct rxrpc_call *call;
        int i;
@@ -173,6 +174,9 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
                                                  call->call_id, 0,
                                                  conn->abort_code,
                                                  conn->error);
+                       else
+                               trace_rxrpc_rx_abort(call, serial,
+                                                    conn->abort_code);
                        if (rxrpc_set_call_completion(call, compl,
                                                      conn->abort_code,
                                                      conn->error))
@@ -213,8 +217,6 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
        conn->state = RXRPC_CONN_LOCALLY_ABORTED;
        spin_unlock_bh(&conn->state_lock);
 
-       rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED);
-
        msg.msg_name    = &conn->params.peer->srx.transport;
        msg.msg_namelen = conn->params.peer->srx.transport_len;
        msg.msg_control = NULL;
@@ -242,6 +244,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
        len = iov[0].iov_len + iov[1].iov_len;
 
        serial = atomic_inc_return(&conn->serial);
+       rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, serial);
        whdr.serial = htonl(serial);
        _proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code);
 
@@ -321,7 +324,7 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
                conn->error = -ECONNABORTED;
                conn->abort_code = abort_code;
                conn->state = RXRPC_CONN_REMOTELY_ABORTED;
-               rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED);
+               rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, sp->hdr.serial);
                return -ECONNABORTED;
 
        case RXRPC_PACKET_TYPE_CHALLENGE:
index 9128aa0e40aac8f51a84f10dc0bd0dd5933c1e23..4c6f9d0a00e79e1874f6ff6ceb6632a42c5072ff 100644 (file)
@@ -837,7 +837,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
                u8 acks[RXRPC_MAXACKS];
        } buf;
        rxrpc_serial_t acked_serial;
-       rxrpc_seq_t first_soft_ack, hard_ack;
+       rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt;
        int nr_acks, offset, ioffset;
 
        _enter("");
@@ -851,13 +851,14 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
 
        acked_serial = ntohl(buf.ack.serial);
        first_soft_ack = ntohl(buf.ack.firstPacket);
+       prev_pkt = ntohl(buf.ack.previousPacket);
        hard_ack = first_soft_ack - 1;
        nr_acks = buf.ack.nAcks;
        summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ?
                              buf.ack.reason : RXRPC_ACK__INVALID);
 
        trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial,
-                          first_soft_ack, ntohl(buf.ack.previousPacket),
+                          first_soft_ack, prev_pkt,
                           summary.ack_reason, nr_acks);
 
        if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE)
@@ -878,8 +879,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
                                  rxrpc_propose_ack_respond_to_ack);
        }
 
-       /* Discard any out-of-order or duplicate ACKs. */
-       if (before_eq(sp->hdr.serial, call->acks_latest))
+       /* Discard any out-of-order or duplicate ACKs (outside lock). */
+       if (before(first_soft_ack, call->ackr_first_seq) ||
+           before(prev_pkt, call->ackr_prev_seq))
                return;
 
        buf.info.rxMTU = 0;
@@ -890,12 +892,16 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
 
        spin_lock(&call->input_lock);
 
-       /* Discard any out-of-order or duplicate ACKs. */
-       if (before_eq(sp->hdr.serial, call->acks_latest))
+       /* Discard any out-of-order or duplicate ACKs (inside lock). */
+       if (before(first_soft_ack, call->ackr_first_seq) ||
+           before(prev_pkt, call->ackr_prev_seq))
                goto out;
        call->acks_latest_ts = skb->tstamp;
        call->acks_latest = sp->hdr.serial;
 
+       call->ackr_first_seq = first_soft_ack;
+       call->ackr_prev_seq = prev_pkt;
+
        /* Parse rwind and mtu sizes if provided. */
        if (buf.info.rxMTU)
                rxrpc_input_ackinfo(call, skb, &buf.info);
index bc05af89fc381daa46d7cf8032c9900dfbcea65c..6e84d878053c7b8821483c0c1447a5c338d5fade 100644 (file)
@@ -157,6 +157,11 @@ void rxrpc_error_report(struct sock *sk)
 
        _enter("%p{%d}", sk, local->debug_id);
 
+       /* Clear the outstanding error value on the socket so that it doesn't
+        * cause kernel_sendmsg() to return it later.
+        */
+       sock_error(sk);
+
        skb = sock_dequeue_err_skb(sk);
        if (!skb) {
                _leave("UDP socket errqueue empty");
index 46c9312085b1ba81b4941607f751a07adb8f3c20..bec64deb7b0a2794345c896827846fa8bac57e19 100644 (file)
@@ -152,12 +152,13 @@ static void rxrpc_notify_end_tx(struct rxrpc_sock *rx, struct rxrpc_call *call,
 }
 
 /*
- * Queue a DATA packet for transmission, set the resend timeout and send the
- * packet immediately
+ * Queue a DATA packet for transmission, set the resend timeout and send
+ * the packet immediately.  Returns the error from rxrpc_send_data_packet()
+ * in case the caller wants to do something with it.
  */
-static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
-                              struct sk_buff *skb, bool last,
-                              rxrpc_notify_end_tx_t notify_end_tx)
+static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
+                             struct sk_buff *skb, bool last,
+                             rxrpc_notify_end_tx_t notify_end_tx)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        unsigned long now;
@@ -250,7 +251,8 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
 
 out:
        rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
-       _leave("");
+       _leave(" = %d", ret);
+       return ret;
 }
 
 /*
@@ -423,9 +425,10 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
                        if (ret < 0)
                                goto out;
 
-                       rxrpc_queue_packet(rx, call, skb,
-                                          !msg_data_left(msg) && !more,
-                                          notify_end_tx);
+                       ret = rxrpc_queue_packet(rx, call, skb,
+                                                !msg_data_left(msg) && !more,
+                                                notify_end_tx);
+                       /* Should check for failure here */
                        skb = NULL;
                }
        } while (msg_data_left(msg) > 0);
index 4060b0955c97db68872a88d6bd05d5143fdc2e7c..0f82d50ea23245be1ce34fcce1cdb4a048c1af17 100644 (file)
@@ -45,8 +45,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
        struct nlattr *tb[TCA_SAMPLE_MAX + 1];
        struct psample_group *psample_group;
        struct tcf_chain *goto_ch = NULL;
+       u32 psample_group_num, rate;
        struct tc_sample *parm;
-       u32 psample_group_num;
        struct tcf_sample *s;
        bool exists = false;
        int ret, err;
@@ -85,6 +85,12 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
        if (err < 0)
                goto release_idr;
 
+       rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
+       if (!rate) {
+               NL_SET_ERR_MSG(extack, "invalid sample rate");
+               err = -EINVAL;
+               goto put_chain;
+       }
        psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
        psample_group = psample_group_get(net, psample_group_num);
        if (!psample_group) {
@@ -96,7 +102,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
 
        spin_lock_bh(&s->tcf_lock);
        goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
-       s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
+       s->rate = rate;
        s->psample_group_num = psample_group_num;
        RCU_INIT_POINTER(s->psample_group, psample_group);
 
index 459921bd3d87b5a563d3725015f8765b20566aa0..a13bc351a4148f40f434b25a9adffc4cf9137548 100644 (file)
@@ -130,6 +130,11 @@ static void mall_destroy(struct tcf_proto *tp, bool rtnl_held,
 
 static void *mall_get(struct tcf_proto *tp, u32 handle)
 {
+       struct cls_mall_head *head = rtnl_dereference(tp->root);
+
+       if (head && head->handle == handle)
+               return head;
+
        return NULL;
 }
 
index acc9b9da985f81ffd9b485e082cf1781e6731ba2..259d97bc2abd39df8df646c2ebc34ea272e1fd70 100644 (file)
@@ -1517,16 +1517,27 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
 
 static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
 {
+       int wlen = skb_network_offset(skb);
        u8 dscp;
 
-       switch (skb->protocol) {
+       switch (tc_skb_protocol(skb)) {
        case htons(ETH_P_IP):
+               wlen += sizeof(struct iphdr);
+               if (!pskb_may_pull(skb, wlen) ||
+                   skb_try_make_writable(skb, wlen))
+                       return 0;
+
                dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
                if (wash && dscp)
                        ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
                return dscp;
 
        case htons(ETH_P_IPV6):
+               wlen += sizeof(struct ipv6hdr);
+               if (!pskb_may_pull(skb, wlen) ||
+                   skb_try_make_writable(skb, wlen))
+                       return 0;
+
                dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
                if (wash && dscp)
                        ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
index 4dc05409e3fb2742c1af9467aae5d1bf221b7101..114b9048ea7e3682106c6e65644d4d0992e20461 100644 (file)
@@ -1358,9 +1358,11 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct cbq_class *cl = (struct cbq_class *)arg;
+       __u32 qlen;
 
        cl->xstats.avgidle = cl->avgidle;
        cl->xstats.undertime = 0;
+       qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog);
 
        if (cl->undertime != PSCHED_PASTPERFECT)
                cl->xstats.undertime = cl->undertime - q->now;
@@ -1368,7 +1370,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
                                  d, NULL, &cl->bstats) < 0 ||
            gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
+           gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
                return -1;
 
        return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
@@ -1665,17 +1667,13 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct cbq_class *cl = (struct cbq_class *)arg;
-       unsigned int qlen, backlog;
 
        if (cl->filters || cl->children || cl == &q->link)
                return -EBUSY;
 
        sch_tree_lock(sch);
 
-       qlen = cl->q->q.qlen;
-       backlog = cl->q->qstats.backlog;
-       qdisc_reset(cl->q);
-       qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
+       qdisc_purge_queue(cl->q);
 
        if (cl->next_alive)
                cbq_deactivate_class(cl);
index 09b8009910657ace91e838eccfa520c81d800750..430df9a55ec4e9742786fb869ab3acf28e84f5ed 100644 (file)
@@ -50,15 +50,6 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
        return container_of(clc, struct drr_class, common);
 }
 
-static void drr_purge_queue(struct drr_class *cl)
-{
-       unsigned int len = cl->qdisc->q.qlen;
-       unsigned int backlog = cl->qdisc->qstats.backlog;
-
-       qdisc_reset(cl->qdisc);
-       qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
-}
-
 static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
        [TCA_DRR_QUANTUM]       = { .type = NLA_U32 },
 };
@@ -167,7 +158,7 @@ static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
 
        sch_tree_lock(sch);
 
-       drr_purge_queue(cl);
+       qdisc_purge_queue(cl->qdisc);
        qdisc_class_hash_remove(&q->clhash, &cl->common);
 
        sch_tree_unlock(sch);
@@ -269,7 +260,8 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
                                struct gnet_dump *d)
 {
        struct drr_class *cl = (struct drr_class *)arg;
-       __u32 qlen = cl->qdisc->q.qlen;
+       __u32 qlen = qdisc_qlen_sum(cl->qdisc);
+       struct Qdisc *cl_q = cl->qdisc;
        struct tc_drr_stats xstats;
 
        memset(&xstats, 0, sizeof(xstats));
@@ -279,7 +271,7 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
                                  d, NULL, &cl->bstats) < 0 ||
            gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0)
+           gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0)
                return -1;
 
        return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
index 24cc220a3218aee4f6c44ed271050c0b8d137ec9..d2ab463f22ae8b122ae43d1969cf795fb11c05b3 100644 (file)
@@ -844,16 +844,6 @@ qdisc_peek_len(struct Qdisc *sch)
        return len;
 }
 
-static void
-hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
-{
-       unsigned int len = cl->qdisc->q.qlen;
-       unsigned int backlog = cl->qdisc->qstats.backlog;
-
-       qdisc_reset(cl->qdisc);
-       qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
-}
-
 static void
 hfsc_adjust_levels(struct hfsc_class *cl)
 {
@@ -1076,7 +1066,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
        list_add_tail(&cl->siblings, &parent->children);
        if (parent->level == 0)
-               hfsc_purge_queue(sch, parent);
+               qdisc_purge_queue(parent->qdisc);
        hfsc_adjust_levels(parent);
        sch_tree_unlock(sch);
 
@@ -1112,7 +1102,7 @@ hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
        list_del(&cl->siblings);
        hfsc_adjust_levels(cl->cl_parent);
 
-       hfsc_purge_queue(sch, cl);
+       qdisc_purge_queue(cl->qdisc);
        qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
 
        sch_tree_unlock(sch);
@@ -1328,8 +1318,9 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
 {
        struct hfsc_class *cl = (struct hfsc_class *)arg;
        struct tc_hfsc_stats xstats;
+       __u32 qlen;
 
-       cl->qstats.backlog = cl->qdisc->qstats.backlog;
+       qdisc_qstats_qlen_backlog(cl->qdisc, &qlen, &cl->qstats.backlog);
        xstats.level   = cl->level;
        xstats.period  = cl->cl_vtperiod;
        xstats.work    = cl->cl_total;
@@ -1337,7 +1328,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
 
        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 ||
            gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0)
+           gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
                return -1;
 
        return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
index 30f9da7e1076368f2b0718d2bb0e1e3c5432998c..2f9883b196e8e6b10abd9b623b6285274a003ff6 100644 (file)
@@ -1127,10 +1127,9 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
        };
        __u32 qlen = 0;
 
-       if (!cl->level && cl->leaf.q) {
-               qlen = cl->leaf.q->q.qlen;
-               qs.backlog = cl->leaf.q->qstats.backlog;
-       }
+       if (!cl->level && cl->leaf.q)
+               qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
+
        cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
                                    INT_MIN, INT_MAX);
        cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
@@ -1270,13 +1269,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
 
        sch_tree_lock(sch);
 
-       if (!cl->level) {
-               unsigned int qlen = cl->leaf.q->q.qlen;
-               unsigned int backlog = cl->leaf.q->qstats.backlog;
-
-               qdisc_reset(cl->leaf.q);
-               qdisc_tree_reduce_backlog(cl->leaf.q, qlen, backlog);
-       }
+       if (!cl->level)
+               qdisc_purge_queue(cl->leaf.q);
 
        /* delete from hash and active; remainder in destroy_class */
        qdisc_class_hash_remove(&q->clhash, &cl->common);
@@ -1404,12 +1398,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                                          classid, NULL);
                sch_tree_lock(sch);
                if (parent && !parent->level) {
-                       unsigned int qlen = parent->leaf.q->q.qlen;
-                       unsigned int backlog = parent->leaf.q->qstats.backlog;
-
                        /* turn parent into inner node */
-                       qdisc_reset(parent->leaf.q);
-                       qdisc_tree_reduce_backlog(parent->leaf.q, qlen, backlog);
+                       qdisc_purge_queue(parent->leaf.q);
                        qdisc_put(parent->leaf.q);
                        if (parent->prio_activity)
                                htb_deactivate(q, parent);
index 203659bc3906419f6a00edca96561efb503d608d..3a3312467692c4f17bc03a78299322ac9a67250c 100644 (file)
@@ -249,7 +249,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
 
        sch = dev_queue->qdisc_sleeping;
        if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
+           qdisc_qstats_copy(d, sch) < 0)
                return -1;
        return 0;
 }
index d364e63c396d78fe8866b9a8d7aa1ec9b281814e..ea0dc112b38dd4ac43d0fdc15f4742583c380991 100644 (file)
@@ -561,8 +561,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
                sch = dev_queue->qdisc_sleeping;
                if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
                                          d, NULL, &sch->bstats) < 0 ||
-                   gnet_stats_copy_queue(d, NULL,
-                                         &sch->qstats, sch->q.qlen) < 0)
+                   qdisc_qstats_copy(d, sch) < 0)
                        return -1;
        }
        return 0;
index 7410ce4d03213d315696ec933722edbc6c542f2a..35b03ae08e0f1f8afbd10f6f4c1d6078e22a48d1 100644 (file)
@@ -201,9 +201,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
        for (i = q->bands; i < q->max_bands; i++) {
                if (q->queues[i] != &noop_qdisc) {
                        struct Qdisc *child = q->queues[i];
+
                        q->queues[i] = &noop_qdisc;
-                       qdisc_tree_reduce_backlog(child, child->q.qlen,
-                                                 child->qstats.backlog);
+                       qdisc_tree_flush_backlog(child);
                        qdisc_put(child);
                }
        }
@@ -225,9 +225,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
                                        qdisc_hash_add(child, true);
 
                                if (old != &noop_qdisc) {
-                                       qdisc_tree_reduce_backlog(old,
-                                                                 old->q.qlen,
-                                                                 old->qstats.backlog);
+                                       qdisc_tree_flush_backlog(old);
                                        qdisc_put(old);
                                }
                                sch_tree_unlock(sch);
@@ -344,7 +342,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
        cl_q = q->queues[cl - 1];
        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
                                  d, NULL, &cl_q->bstats) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
+           qdisc_qstats_copy(d, cl_q) < 0)
                return -1;
 
        return 0;
index 847141cd900f1933f0b48684085c747f06c092c1..d519b21535b36b1f163460593573cb018cd1a904 100644 (file)
@@ -216,12 +216,8 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
        q->bands = qopt->bands;
        memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
 
-       for (i = q->bands; i < oldbands; i++) {
-               struct Qdisc *child = q->queues[i];
-
-               qdisc_tree_reduce_backlog(child, child->q.qlen,
-                                         child->qstats.backlog);
-       }
+       for (i = q->bands; i < oldbands; i++)
+               qdisc_tree_flush_backlog(q->queues[i]);
 
        for (i = oldbands; i < q->bands; i++) {
                q->queues[i] = queues[i];
@@ -365,7 +361,7 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
        cl_q = q->queues[cl - 1];
        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
                                  d, NULL, &cl_q->bstats) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
+           qdisc_qstats_copy(d, cl_q) < 0)
                return -1;
 
        return 0;
index 29f5c4a2468829457ddf734aa1e7711ebfe4bcc8..1589364b54da11dc241212dee190dad741d9d9bc 100644 (file)
@@ -217,15 +217,6 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
        return container_of(clc, struct qfq_class, common);
 }
 
-static void qfq_purge_queue(struct qfq_class *cl)
-{
-       unsigned int len = cl->qdisc->q.qlen;
-       unsigned int backlog = cl->qdisc->qstats.backlog;
-
-       qdisc_reset(cl->qdisc);
-       qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
-}
-
 static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
        [TCA_QFQ_WEIGHT] = { .type = NLA_U32 },
        [TCA_QFQ_LMAX] = { .type = NLA_U32 },
@@ -551,7 +542,7 @@ static int qfq_delete_class(struct Qdisc *sch, unsigned long arg)
 
        sch_tree_lock(sch);
 
-       qfq_purge_queue(cl);
+       qdisc_purge_queue(cl->qdisc);
        qdisc_class_hash_remove(&q->clhash, &cl->common);
 
        sch_tree_unlock(sch);
@@ -655,8 +646,7 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
                                  d, NULL, &cl->bstats) < 0 ||
            gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
-           gnet_stats_copy_queue(d, NULL,
-                                 &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0)
+           qdisc_qstats_copy(d, cl->qdisc) < 0)
                return -1;
 
        return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
index 9df9942340eaaa30ed38fc3345649f287a373bee..4e8c0abf619459f396b91fc587271b7938e48330 100644 (file)
@@ -233,8 +233,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
        q->flags = ctl->flags;
        q->limit = ctl->limit;
        if (child) {
-               qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
-                                         q->qdisc->qstats.backlog);
+               qdisc_tree_flush_backlog(q->qdisc);
                old_child = q->qdisc;
                q->qdisc = child;
        }
index bab506b01a32950d2ac07ff04815424705c62503..2419fdb759667a5c124f2018a310aabe9318b257 100644 (file)
@@ -521,8 +521,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
                qdisc_hash_add(child, true);
        sch_tree_lock(sch);
 
-       qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
-                                 q->qdisc->qstats.backlog);
+       qdisc_tree_flush_backlog(q->qdisc);
        qdisc_put(q->qdisc);
        q->qdisc = child;
 
index 206e4dbed12f0e08a2c8782fedac1247995001bb..c7041999eb5d348e7520dab451205fda7a72d22b 100644 (file)
@@ -895,7 +895,7 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
 
        sch = dev_queue->qdisc_sleeping;
        if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
+           qdisc_qstats_copy(d, sch) < 0)
                return -1;
        return 0;
 }
index 7f272a9070c5753e61dd140eca77afe4d17d6692..f71578dbb9e39292329e06d98535c764043acd55 100644 (file)
@@ -391,8 +391,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
 
        sch_tree_lock(sch);
        if (child) {
-               qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
-                                         q->qdisc->qstats.backlog);
+               qdisc_tree_flush_backlog(q->qdisc);
                qdisc_put(q->qdisc);
                q->qdisc = child;
        }
index 6abc8b274270730e482730bbc3ef735a7ffd2e52..951afdeea5e92c7cab48f53482d307c3f9893d89 100644 (file)
@@ -600,6 +600,7 @@ out:
 static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
 {
        /* No address mapping for V4 sockets */
+       memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
        return sizeof(struct sockaddr_in);
 }
 
index 9874e60c9b0d00924042c1b377bc0c777edfc4cb..4583fa914e62aedaf2ef29c5cf668f0caee4eade 100644 (file)
@@ -4847,7 +4847,8 @@ static int sctp_connect(struct sock *sk, struct sockaddr *addr,
        }
 
        /* Validate addr_len before calling common connect/connectx routine. */
-       af = sctp_get_af_specific(addr->sa_family);
+       af = addr_len < offsetofend(struct sockaddr, sa_family) ? NULL :
+               sctp_get_af_specific(addr->sa_family);
        if (!af || addr_len < af->sockaddr_len) {
                err = -EINVAL;
        } else {
index 77ef53596d18c5fd091b6888efbc8b35063087a8..6f869ef49b3226806ab7f7973821870d77618004 100644 (file)
@@ -167,10 +167,9 @@ static int smc_release(struct socket *sock)
 
        if (sk->sk_state == SMC_CLOSED) {
                if (smc->clcsock) {
-                       mutex_lock(&smc->clcsock_release_lock);
-                       sock_release(smc->clcsock);
-                       smc->clcsock = NULL;
-                       mutex_unlock(&smc->clcsock_release_lock);
+                       release_sock(sk);
+                       smc_clcsock_release(smc);
+                       lock_sock(sk);
                }
                if (!smc->use_fallback)
                        smc_conn_free(&smc->conn);
@@ -446,10 +445,19 @@ static void smc_link_save_peer_info(struct smc_link *link,
        link->peer_mtu = clc->qp_mtu;
 }
 
+static void smc_switch_to_fallback(struct smc_sock *smc)
+{
+       smc->use_fallback = true;
+       if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
+               smc->clcsock->file = smc->sk.sk_socket->file;
+               smc->clcsock->file->private_data = smc->clcsock;
+       }
+}
+
 /* fall back during connect */
 static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
 {
-       smc->use_fallback = true;
+       smc_switch_to_fallback(smc);
        smc->fallback_rsn = reason_code;
        smc_copy_sock_settings_to_clc(smc);
        if (smc->sk.sk_state == SMC_INIT)
@@ -775,10 +783,14 @@ static void smc_connect_work(struct work_struct *work)
                smc->sk.sk_err = -rc;
 
 out:
-       if (smc->sk.sk_err)
-               smc->sk.sk_state_change(&smc->sk);
-       else
-               smc->sk.sk_write_space(&smc->sk);
+       if (!sock_flag(&smc->sk, SOCK_DEAD)) {
+               if (smc->sk.sk_err) {
+                       smc->sk.sk_state_change(&smc->sk);
+               } else { /* allow polling before and after fallback decision */
+                       smc->clcsock->sk->sk_write_space(smc->clcsock->sk);
+                       smc->sk.sk_write_space(&smc->sk);
+               }
+       }
        kfree(smc->connect_info);
        smc->connect_info = NULL;
        release_sock(&smc->sk);
@@ -872,11 +884,11 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
        if  (rc < 0)
                lsk->sk_err = -rc;
        if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
+               new_sk->sk_prot->unhash(new_sk);
                if (new_clcsock)
                        sock_release(new_clcsock);
                new_sk->sk_state = SMC_CLOSED;
                sock_set_flag(new_sk, SOCK_DEAD);
-               new_sk->sk_prot->unhash(new_sk);
                sock_put(new_sk); /* final */
                *new_smc = NULL;
                goto out;
@@ -927,16 +939,21 @@ struct sock *smc_accept_dequeue(struct sock *parent,
 
                smc_accept_unlink(new_sk);
                if (new_sk->sk_state == SMC_CLOSED) {
+                       new_sk->sk_prot->unhash(new_sk);
                        if (isk->clcsock) {
                                sock_release(isk->clcsock);
                                isk->clcsock = NULL;
                        }
-                       new_sk->sk_prot->unhash(new_sk);
                        sock_put(new_sk); /* final */
                        continue;
                }
-               if (new_sock)
+               if (new_sock) {
                        sock_graft(new_sk, new_sock);
+                       if (isk->use_fallback) {
+                               smc_sk(new_sk)->clcsock->file = new_sock->file;
+                               isk->clcsock->file->private_data = isk->clcsock;
+                       }
+               }
                return new_sk;
        }
        return NULL;
@@ -956,6 +973,7 @@ void smc_close_non_accepted(struct sock *sk)
                sock_set_flag(sk, SOCK_DEAD);
                sk->sk_shutdown |= SHUTDOWN_MASK;
        }
+       sk->sk_prot->unhash(sk);
        if (smc->clcsock) {
                struct socket *tcp;
 
@@ -971,7 +989,6 @@ void smc_close_non_accepted(struct sock *sk)
                        smc_conn_free(&smc->conn);
        }
        release_sock(sk);
-       sk->sk_prot->unhash(sk);
        sock_put(sk); /* final sock_put */
 }
 
@@ -1037,13 +1054,13 @@ static void smc_listen_out(struct smc_sock *new_smc)
        struct smc_sock *lsmc = new_smc->listen_smc;
        struct sock *newsmcsk = &new_smc->sk;
 
-       lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
        if (lsmc->sk.sk_state == SMC_LISTEN) {
+               lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
                smc_accept_enqueue(&lsmc->sk, newsmcsk);
+               release_sock(&lsmc->sk);
        } else { /* no longer listening */
                smc_close_non_accepted(newsmcsk);
        }
-       release_sock(&lsmc->sk);
 
        /* Wake up accept */
        lsmc->sk.sk_data_ready(&lsmc->sk);
@@ -1087,7 +1104,7 @@ static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
                return;
        }
        smc_conn_free(&new_smc->conn);
-       new_smc->use_fallback = true;
+       smc_switch_to_fallback(new_smc);
        new_smc->fallback_rsn = reason_code;
        if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
                if (smc_clc_send_decline(new_smc, reason_code) < 0) {
@@ -1237,6 +1254,9 @@ static void smc_listen_work(struct work_struct *work)
        int rc = 0;
        u8 ibport;
 
+       if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
+               return smc_listen_out_err(new_smc);
+
        if (new_smc->use_fallback) {
                smc_listen_out_connected(new_smc);
                return;
@@ -1244,7 +1264,7 @@ static void smc_listen_work(struct work_struct *work)
 
        /* check if peer is smc capable */
        if (!tcp_sk(newclcsock->sk)->syn_smc) {
-               new_smc->use_fallback = true;
+               smc_switch_to_fallback(new_smc);
                new_smc->fallback_rsn = SMC_CLC_DECL_PEERNOSMC;
                smc_listen_out_connected(new_smc);
                return;
@@ -1501,7 +1521,7 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 
        if (msg->msg_flags & MSG_FASTOPEN) {
                if (sk->sk_state == SMC_INIT) {
-                       smc->use_fallback = true;
+                       smc_switch_to_fallback(smc);
                        smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
                } else {
                        rc = -EINVAL;
@@ -1703,7 +1723,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
        case TCP_FASTOPEN_NO_COOKIE:
                /* option not supported by SMC */
                if (sk->sk_state == SMC_INIT) {
-                       smc->use_fallback = true;
+                       smc_switch_to_fallback(smc);
                        smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
                } else {
                        if (!smc->use_fallback)
index 2ad37e998509310f210f4e3654cc054487731e87..fc06720b53c1442a8dd3222ed7be482a8993ab92 100644 (file)
 
 #define SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME     (5 * HZ)
 
+/* release the clcsock that is assigned to the smc_sock */
+void smc_clcsock_release(struct smc_sock *smc)
+{
+       struct socket *tcp;
+
+       if (smc->listen_smc && current_work() != &smc->smc_listen_work)
+               cancel_work_sync(&smc->smc_listen_work);
+       mutex_lock(&smc->clcsock_release_lock);
+       if (smc->clcsock) {
+               tcp = smc->clcsock;
+               smc->clcsock = NULL;
+               sock_release(tcp);
+       }
+       mutex_unlock(&smc->clcsock_release_lock);
+}
+
 static void smc_close_cleanup_listen(struct sock *parent)
 {
        struct sock *sk;
@@ -321,6 +337,7 @@ static void smc_close_passive_work(struct work_struct *work)
                                                   close_work);
        struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
        struct smc_cdc_conn_state_flags *rxflags;
+       bool release_clcsock = false;
        struct sock *sk = &smc->sk;
        int old_state;
 
@@ -400,13 +417,13 @@ wakeup:
                if ((sk->sk_state == SMC_CLOSED) &&
                    (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) {
                        smc_conn_free(conn);
-                       if (smc->clcsock) {
-                               sock_release(smc->clcsock);
-                               smc->clcsock = NULL;
-                       }
+                       if (smc->clcsock)
+                               release_clcsock = true;
                }
        }
        release_sock(sk);
+       if (release_clcsock)
+               smc_clcsock_release(smc);
        sock_put(sk); /* sock_hold done by schedulers of close_work */
 }
 
index 19eb6a211c23cd12fad8f5077a26209bb05c3d33..e0e3b5df25d2474b8aadd2e7639d07e0c8c631ef 100644 (file)
@@ -23,5 +23,6 @@ void smc_close_wake_tx_prepared(struct smc_sock *smc);
 int smc_close_active(struct smc_sock *smc);
 int smc_close_shutdown_write(struct smc_sock *smc);
 void smc_close_init(struct smc_sock *smc);
+void smc_clcsock_release(struct smc_sock *smc);
 
 #endif /* SMC_CLOSE_H */
index 2fff79db1a59ce3d2908722941dd9355810c65a0..e89e918b88e09acaad980da8dc34e3d921fe69be 100644 (file)
@@ -289,6 +289,11 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
        INIT_LIST_HEAD(&smcd->vlan);
        smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
                                                 WQ_MEM_RECLAIM, name);
+       if (!smcd->event_wq) {
+               kfree(smcd->conn);
+               kfree(smcd);
+               return NULL;
+       }
        return smcd;
 }
 EXPORT_SYMBOL_GPL(smcd_alloc_dev);
index 8d2f6296279c96827e332153ff274f522a3cb689..0285c7f9e79b6edb6a288be8bb50092a55bc7cfb 100644 (file)
@@ -603,7 +603,8 @@ static int smc_pnet_flush(struct sk_buff *skb, struct genl_info *info)
 {
        struct net *net = genl_info_net(info);
 
-       return smc_pnet_remove_by_pnetid(net, NULL);
+       smc_pnet_remove_by_pnetid(net, NULL);
+       return 0;
 }
 
 /* SMC_PNETID generic netlink operation definition */
index 860dcfb95ee472fed5d74e6015af2acce178c0a7..fa6c977b4c41a4a0b8deeb99c3e5d0d03c55de2b 100644 (file)
@@ -140,13 +140,11 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
                        /* We are going to append to the frags_list of head.
                         * Need to unshare the frag_list.
                         */
-                       if (skb_has_frag_list(head)) {
-                               err = skb_unclone(head, GFP_ATOMIC);
-                               if (err) {
-                                       STRP_STATS_INCR(strp->stats.mem_fail);
-                                       desc->error = err;
-                                       return 0;
-                               }
+                       err = skb_unclone(head, GFP_ATOMIC);
+                       if (err) {
+                               STRP_STATS_INCR(strp->stats.mem_fail);
+                               desc->error = err;
+                               return 0;
                        }
 
                        if (unlikely(skb_shinfo(head)->frag_list)) {
index 187d10443a1584e196245afc9837add06daa1c86..8ff11dc98d7f93fefeff6ecc53ff6d7815da47f8 100644 (file)
@@ -1540,7 +1540,6 @@ call_start(struct rpc_task *task)
        clnt->cl_stats->rpccnt++;
        task->tk_action = call_reserve;
        rpc_task_set_transport(task, clnt);
-       call_reserve(task);
 }
 
 /*
@@ -1554,9 +1553,6 @@ call_reserve(struct rpc_task *task)
        task->tk_status  = 0;
        task->tk_action  = call_reserveresult;
        xprt_reserve(task);
-       if (rpc_task_need_resched(task))
-               return;
-        call_reserveresult(task);
 }
 
 static void call_retry_reserve(struct rpc_task *task);
@@ -1579,7 +1575,6 @@ call_reserveresult(struct rpc_task *task)
        if (status >= 0) {
                if (task->tk_rqstp) {
                        task->tk_action = call_refresh;
-                       call_refresh(task);
                        return;
                }
 
@@ -1605,7 +1600,6 @@ call_reserveresult(struct rpc_task *task)
                /* fall through */
        case -EAGAIN:   /* woken up; retry */
                task->tk_action = call_retry_reserve;
-               call_retry_reserve(task);
                return;
        case -EIO:      /* probably a shutdown */
                break;
@@ -1628,9 +1622,6 @@ call_retry_reserve(struct rpc_task *task)
        task->tk_status  = 0;
        task->tk_action  = call_reserveresult;
        xprt_retry_reserve(task);
-       if (rpc_task_need_resched(task))
-               return;
-       call_reserveresult(task);
 }
 
 /*
@@ -1645,9 +1636,6 @@ call_refresh(struct rpc_task *task)
        task->tk_status = 0;
        task->tk_client->cl_stats->rpcauthrefresh++;
        rpcauth_refreshcred(task);
-       if (rpc_task_need_resched(task))
-               return;
-       call_refreshresult(task);
 }
 
 /*
@@ -1666,7 +1654,6 @@ call_refreshresult(struct rpc_task *task)
        case 0:
                if (rpcauth_uptodatecred(task)) {
                        task->tk_action = call_allocate;
-                       call_allocate(task);
                        return;
                }
                /* Use rate-limiting and a max number of retries if refresh
@@ -1685,7 +1672,6 @@ call_refreshresult(struct rpc_task *task)
                task->tk_cred_retry--;
                dprintk("RPC: %5u %s: retry refresh creds\n",
                                task->tk_pid, __func__);
-               call_refresh(task);
                return;
        }
        dprintk("RPC: %5u %s: refresh creds failed with error %d\n",
@@ -1711,10 +1697,8 @@ call_allocate(struct rpc_task *task)
        task->tk_status = 0;
        task->tk_action = call_encode;
 
-       if (req->rq_buffer) {
-               call_encode(task);
+       if (req->rq_buffer)
                return;
-       }
 
        if (proc->p_proc != 0) {
                BUG_ON(proc->p_arglen == 0);
@@ -1740,12 +1724,8 @@ call_allocate(struct rpc_task *task)
 
        status = xprt->ops->buf_alloc(task);
        xprt_inject_disconnect(xprt);
-       if (status == 0) {
-               if (rpc_task_need_resched(task))
-                       return;
-               call_encode(task);
+       if (status == 0)
                return;
-       }
        if (status != -ENOMEM) {
                rpc_exit(task, status);
                return;
@@ -1828,8 +1808,12 @@ call_encode(struct rpc_task *task)
                xprt_request_enqueue_receive(task);
        xprt_request_enqueue_transmit(task);
 out:
-       task->tk_action = call_bind;
-       call_bind(task);
+       task->tk_action = call_transmit;
+       /* Check that the connection is OK */
+       if (!xprt_bound(task->tk_xprt))
+               task->tk_action = call_bind;
+       else if (!xprt_connected(task->tk_xprt))
+               task->tk_action = call_connect;
 }
 
 /*
@@ -1847,7 +1831,6 @@ rpc_task_handle_transmitted(struct rpc_task *task)
 {
        xprt_end_transmit(task);
        task->tk_action = call_transmit_status;
-       call_transmit_status(task);
 }
 
 /*
@@ -1865,7 +1848,6 @@ call_bind(struct rpc_task *task)
 
        if (xprt_bound(xprt)) {
                task->tk_action = call_connect;
-               call_connect(task);
                return;
        }
 
@@ -1896,7 +1878,6 @@ call_bind_status(struct rpc_task *task)
                dprint_status(task);
                task->tk_status = 0;
                task->tk_action = call_connect;
-               call_connect(task);
                return;
        }
 
@@ -1981,7 +1962,6 @@ call_connect(struct rpc_task *task)
 
        if (xprt_connected(xprt)) {
                task->tk_action = call_transmit;
-               call_transmit(task);
                return;
        }
 
@@ -2051,7 +2031,6 @@ call_connect_status(struct rpc_task *task)
        case 0:
                clnt->cl_stats->netreconn++;
                task->tk_action = call_transmit;
-               call_transmit(task);
                return;
        }
        rpc_exit(task, status);
@@ -2087,9 +2066,6 @@ call_transmit(struct rpc_task *task)
                xprt_transmit(task);
        }
        xprt_end_transmit(task);
-       if (rpc_task_need_resched(task))
-               return;
-       call_transmit_status(task);
 }
 
 /*
@@ -2105,11 +2081,8 @@ call_transmit_status(struct rpc_task *task)
         * test first.
         */
        if (rpc_task_transmitted(task)) {
-               if (task->tk_status == 0)
-                       xprt_request_wait_receive(task);
-               if (rpc_task_need_resched(task))
-                       return;
-               call_status(task);
+               task->tk_status = 0;
+               xprt_request_wait_receive(task);
                return;
        }
 
@@ -2170,7 +2143,6 @@ call_bc_encode(struct rpc_task *task)
 {
        xprt_request_enqueue_transmit(task);
        task->tk_action = call_bc_transmit;
-       call_bc_transmit(task);
 }
 
 /*
@@ -2195,6 +2167,9 @@ call_bc_transmit_status(struct rpc_task *task)
 {
        struct rpc_rqst *req = task->tk_rqstp;
 
+       if (rpc_task_transmitted(task))
+               task->tk_status = 0;
+
        dprint_status(task);
 
        switch (task->tk_status) {
@@ -2261,7 +2236,6 @@ call_status(struct rpc_task *task)
        status = task->tk_status;
        if (status >= 0) {
                task->tk_action = call_decode;
-               call_decode(task);
                return;
        }
 
index 89a63391d4d442f6d390556aa8cf0b5a2a41357a..30cfc0efe6990aa5f693b56c26c257bd968f43e5 100644 (file)
@@ -90,7 +90,7 @@ static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
        /* Flush Receives, then wait for deferred Reply work
         * to complete.
         */
-       ib_drain_qp(ia->ri_id->qp);
+       ib_drain_rq(ia->ri_id->qp);
        drain_workqueue(buf->rb_completion_wq);
 
        /* Deferred Reply processing might have scheduled
index 341ecd796aa473d35e770d4dfbf413ee3bcdc1cf..131aa2f0fd27c46e14f024b317dd65c786b0bea4 100644 (file)
@@ -869,6 +869,8 @@ void tipc_link_reset(struct tipc_link *l)
        __skb_queue_head_init(&list);
 
        l->in_session = false;
+       /* Force re-synch of peer session number before establishing */
+       l->peer_session--;
        l->session++;
        l->mtu = l->advertised_mtu;
 
index bff241f0352501aba8605622df16f2c85044c09b..89993afe0fbd38713dd3d0499cc79e6c3e159b4d 100644 (file)
@@ -909,7 +909,8 @@ static int tipc_nl_service_list(struct net *net, struct tipc_nl_msg *msg,
        for (; i < TIPC_NAMETBL_SIZE; i++) {
                head = &tn->nametbl->services[i];
 
-               if (*last_type) {
+               if (*last_type ||
+                   (!i && *last_key && (*last_lower == *last_key))) {
                        service = tipc_service_find(net, *last_type);
                        if (!service)
                                return -EPIPE;
index 4ad3586da8f028c0fb8244382b343a1c7635f6cb..340a6e7c43a7d39c596de4f1b0045e4200edc2a3 100644 (file)
@@ -267,8 +267,14 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
        if (msg->rep_type)
                tipc_tlv_init(msg->rep, msg->rep_type);
 
-       if (cmd->header)
-               (*cmd->header)(msg);
+       if (cmd->header) {
+               err = (*cmd->header)(msg);
+               if (err) {
+                       kfree_skb(msg->rep);
+                       msg->rep = NULL;
+                       return err;
+               }
+       }
 
        arg = nlmsg_new(0, GFP_KERNEL);
        if (!arg) {
@@ -397,7 +403,12 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
        if (!bearer)
                return -EMSGSIZE;
 
-       len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+       len = TLV_GET_DATA_LEN(msg->req);
+       len -= offsetof(struct tipc_bearer_config, name);
+       if (len <= 0)
+               return -EINVAL;
+
+       len = min_t(int, len, TIPC_MAX_BEARER_NAME);
        if (!string_is_valid(b->name, len))
                return -EINVAL;
 
@@ -766,7 +777,12 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
 
        lc = (struct tipc_link_config *)TLV_DATA(msg->req);
 
-       len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+       len = TLV_GET_DATA_LEN(msg->req);
+       len -= offsetof(struct tipc_link_config, name);
+       if (len <= 0)
+               return -EINVAL;
+
+       len = min_t(int, len, TIPC_MAX_LINK_NAME);
        if (!string_is_valid(lc->name, len))
                return -EINVAL;
 
index 3481e4906bd6a4a3e1f27ec5d49106090c7ec7f1..9df82a573aa7768f583999e740022ce00295bbd4 100644 (file)
@@ -38,6 +38,8 @@
 
 #include <linux/sysctl.h>
 
+static int zero;
+static int one = 1;
 static struct ctl_table_header *tipc_ctl_hdr;
 
 static struct ctl_table tipc_table[] = {
@@ -46,14 +48,16 @@ static struct ctl_table tipc_table[] = {
                .data           = &sysctl_tipc_rmem,
                .maxlen         = sizeof(sysctl_tipc_rmem),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &one,
        },
        {
                .procname       = "named_timeout",
                .data           = &sysctl_tipc_named_timeout,
                .maxlen         = sizeof(sysctl_tipc_named_timeout),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
        },
        {
                .procname       = "sk_filter",
index 135a7ee9db034149252f8df3a56f7834ff573eab..9f3bdbc1e59348cf049c0cca9959ee5f413e41cb 100644 (file)
@@ -52,8 +52,11 @@ static DEFINE_SPINLOCK(tls_device_lock);
 
 static void tls_device_free_ctx(struct tls_context *ctx)
 {
-       if (ctx->tx_conf == TLS_HW)
+       if (ctx->tx_conf == TLS_HW) {
                kfree(tls_offload_ctx_tx(ctx));
+               kfree(ctx->tx.rec_seq);
+               kfree(ctx->tx.iv);
+       }
 
        if (ctx->rx_conf == TLS_HW)
                kfree(tls_offload_ctx_rx(ctx));
@@ -216,6 +219,13 @@ void tls_device_sk_destruct(struct sock *sk)
 }
 EXPORT_SYMBOL(tls_device_sk_destruct);
 
+void tls_device_free_resources_tx(struct sock *sk)
+{
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
+
+       tls_free_partial_record(sk, tls_ctx);
+}
+
 static void tls_append_frag(struct tls_record_info *record,
                            struct page_frag *pfrag,
                            int size)
index df921a2904b9b5b96acab53e52fa66090a900660..9547cea0ce3b078b4ca79fec7b24232b046b74c6 100644 (file)
@@ -208,6 +208,26 @@ int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
        return tls_push_sg(sk, ctx, sg, offset, flags);
 }
 
+bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx)
+{
+       struct scatterlist *sg;
+
+       sg = ctx->partially_sent_record;
+       if (!sg)
+               return false;
+
+       while (1) {
+               put_page(sg_page(sg));
+               sk_mem_uncharge(sk, sg->length);
+
+               if (sg_is_last(sg))
+                       break;
+               sg++;
+       }
+       ctx->partially_sent_record = NULL;
+       return true;
+}
+
 static void tls_write_space(struct sock *sk)
 {
        struct tls_context *ctx = tls_get_ctx(sk);
@@ -267,6 +287,10 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
                kfree(ctx->tx.rec_seq);
                kfree(ctx->tx.iv);
                tls_sw_free_resources_tx(sk);
+#ifdef CONFIG_TLS_DEVICE
+       } else if (ctx->tx_conf == TLS_HW) {
+               tls_device_free_resources_tx(sk);
+#endif
        }
 
        if (ctx->rx_conf == TLS_SW) {
index 425351ac2a9b156aacf9234e566d7c5ba0dc5867..b50ced862f6f9a9f9d959950dcee4adbdd59cb6e 100644 (file)
@@ -1484,6 +1484,8 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
 
                                return err;
                        }
+               } else {
+                       *zc = false;
                }
 
                rxm->full_len -= padding_length(ctx, tls_ctx, skb);
@@ -2050,20 +2052,7 @@ void tls_sw_free_resources_tx(struct sock *sk)
        /* Free up un-sent records in tx_list. First, free
         * the partially sent record if any at head of tx_list.
         */
-       if (tls_ctx->partially_sent_record) {
-               struct scatterlist *sg = tls_ctx->partially_sent_record;
-
-               while (1) {
-                       put_page(sg_page(sg));
-                       sk_mem_uncharge(sk, sg->length);
-
-                       if (sg_is_last(sg))
-                               break;
-                       sg++;
-               }
-
-               tls_ctx->partially_sent_record = NULL;
-
+       if (tls_free_partial_record(sk, tls_ctx)) {
                rec = list_first_entry(&ctx->tx_list,
                                       struct tls_rec, list);
                list_del(&rec->list);
index 25a9e3b5c1542a71fff0f4f2ab8166f977786f8c..47e30a58566c2817696655212a8da4c5fc00f00e 100644 (file)
@@ -13650,7 +13650,8 @@ static const struct genl_ops nl80211_ops[] = {
                .policy = nl80211_policy,
                .flags = GENL_UNS_ADMIN_PERM,
                .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-                                 NL80211_FLAG_NEED_RTNL,
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_CLEAR_SKB,
        },
        {
                .cmd = NL80211_CMD_DEAUTHENTICATE,
@@ -13701,7 +13702,8 @@ static const struct genl_ops nl80211_ops[] = {
                .policy = nl80211_policy,
                .flags = GENL_UNS_ADMIN_PERM,
                .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-                                 NL80211_FLAG_NEED_RTNL,
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_CLEAR_SKB,
        },
        {
                .cmd = NL80211_CMD_UPDATE_CONNECT_PARAMS,
@@ -13709,7 +13711,8 @@ static const struct genl_ops nl80211_ops[] = {
                .policy = nl80211_policy,
                .flags = GENL_ADMIN_PERM,
                .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-                                 NL80211_FLAG_NEED_RTNL,
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_CLEAR_SKB,
        },
        {
                .cmd = NL80211_CMD_DISCONNECT,
@@ -13738,7 +13741,8 @@ static const struct genl_ops nl80211_ops[] = {
                .policy = nl80211_policy,
                .flags = GENL_UNS_ADMIN_PERM,
                .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-                                 NL80211_FLAG_NEED_RTNL,
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_CLEAR_SKB,
        },
        {
                .cmd = NL80211_CMD_DEL_PMKSA,
@@ -14090,7 +14094,8 @@ static const struct genl_ops nl80211_ops[] = {
                .policy = nl80211_policy,
                .flags = GENL_UNS_ADMIN_PERM,
                .internal_flags = NL80211_FLAG_NEED_WIPHY |
-                                 NL80211_FLAG_NEED_RTNL,
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_CLEAR_SKB,
        },
        {
                .cmd = NL80211_CMD_SET_QOS_MAP,
@@ -14145,7 +14150,8 @@ static const struct genl_ops nl80211_ops[] = {
                .doit = nl80211_set_pmk,
                .policy = nl80211_policy,
                .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-                                 NL80211_FLAG_NEED_RTNL,
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_CLEAR_SKB,
        },
        {
                .cmd = NL80211_CMD_DEL_PMK,
index 2f1bf91eb2265a26bcebeeb3589735e77a3a9daa..0ba778f371cb25fe6b610dc6e112beed2db55e9b 100644 (file)
@@ -1309,6 +1309,16 @@ reg_intersect_dfs_region(const enum nl80211_dfs_regions dfs_region1,
        return dfs_region1;
 }
 
+static void reg_wmm_rules_intersect(const struct ieee80211_wmm_ac *wmm_ac1,
+                                   const struct ieee80211_wmm_ac *wmm_ac2,
+                                   struct ieee80211_wmm_ac *intersect)
+{
+       intersect->cw_min = max_t(u16, wmm_ac1->cw_min, wmm_ac2->cw_min);
+       intersect->cw_max = max_t(u16, wmm_ac1->cw_max, wmm_ac2->cw_max);
+       intersect->cot = min_t(u16, wmm_ac1->cot, wmm_ac2->cot);
+       intersect->aifsn = max_t(u8, wmm_ac1->aifsn, wmm_ac2->aifsn);
+}
+
 /*
  * Helper for regdom_intersect(), this does the real
  * mathematical intersection fun
@@ -1323,6 +1333,8 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
        struct ieee80211_freq_range *freq_range;
        const struct ieee80211_power_rule *power_rule1, *power_rule2;
        struct ieee80211_power_rule *power_rule;
+       const struct ieee80211_wmm_rule *wmm_rule1, *wmm_rule2;
+       struct ieee80211_wmm_rule *wmm_rule;
        u32 freq_diff, max_bandwidth1, max_bandwidth2;
 
        freq_range1 = &rule1->freq_range;
@@ -1333,6 +1345,10 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
        power_rule2 = &rule2->power_rule;
        power_rule = &intersected_rule->power_rule;
 
+       wmm_rule1 = &rule1->wmm_rule;
+       wmm_rule2 = &rule2->wmm_rule;
+       wmm_rule = &intersected_rule->wmm_rule;
+
        freq_range->start_freq_khz = max(freq_range1->start_freq_khz,
                                         freq_range2->start_freq_khz);
        freq_range->end_freq_khz = min(freq_range1->end_freq_khz,
@@ -1376,6 +1392,29 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
        intersected_rule->dfs_cac_ms = max(rule1->dfs_cac_ms,
                                           rule2->dfs_cac_ms);
 
+       if (rule1->has_wmm && rule2->has_wmm) {
+               u8 ac;
+
+               for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+                       reg_wmm_rules_intersect(&wmm_rule1->client[ac],
+                                               &wmm_rule2->client[ac],
+                                               &wmm_rule->client[ac]);
+                       reg_wmm_rules_intersect(&wmm_rule1->ap[ac],
+                                               &wmm_rule2->ap[ac],
+                                               &wmm_rule->ap[ac]);
+               }
+
+               intersected_rule->has_wmm = true;
+       } else if (rule1->has_wmm) {
+               *wmm_rule = *wmm_rule1;
+               intersected_rule->has_wmm = true;
+       } else if (rule2->has_wmm) {
+               *wmm_rule = *wmm_rule2;
+               intersected_rule->has_wmm = true;
+       } else {
+               intersected_rule->has_wmm = false;
+       }
+
        if (!is_valid_reg_rule(intersected_rule))
                return -EINVAL;
 
index 287518c6caa40204525993d8b2477e269324f378..04d888628f29dcca952d38d48b785d5e2c56dfef 100644 (file)
@@ -190,10 +190,9 @@ static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
        /* copy subelement as we need to change its content to
         * mark an ie after it is processed.
         */
-       sub_copy = kmalloc(subie_len, gfp);
+       sub_copy = kmemdup(subelement, subie_len, gfp);
        if (!sub_copy)
                return 0;
-       memcpy(sub_copy, subelement, subie_len);
 
        pos = &new_ie[0];
 
index e4b8db5e81ec710db0ee8e779046e04263441e08..75899b62bdc9ed2116a1420a6035c904307f4838 100644 (file)
@@ -1220,9 +1220,11 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
        else if (rate->bw == RATE_INFO_BW_HE_RU &&
                 rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_26)
                result = rates_26[rate->he_gi];
-       else if (WARN(1, "invalid HE MCS: bw:%d, ru:%d\n",
-                     rate->bw, rate->he_ru_alloc))
+       else {
+               WARN(1, "invalid HE MCS: bw:%d, ru:%d\n",
+                    rate->bw, rate->he_ru_alloc);
                return 0;
+       }
 
        /* now scale to the appropriate MCS */
        tmp = result;
index 27400b0cd732e2f37733e321dee5742c8cdf6721..000dc6437893baa133224c1d9922e97433c573f4 100644 (file)
@@ -13,7 +13,7 @@ gen-atomic-long.sh              asm-generic/atomic-long.h
 gen-atomic-fallback.sh          linux/atomic-fallback.h
 EOF
 while read script header; do
-       ${ATOMICDIR}/${script} ${ATOMICTBL} > ${LINUXDIR}/include/${header}
+       /bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} > ${LINUXDIR}/include/${header}
        HASH="$(sha1sum ${LINUXDIR}/include/${header})"
        HASH="${HASH%% *}"
        printf "// %s\n" "${HASH}" >> ${LINUXDIR}/include/${header}
diff --git a/scripts/coccinelle/api/stream_open.cocci b/scripts/coccinelle/api/stream_open.cocci
new file mode 100644 (file)
index 0000000..350145d
--- /dev/null
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0
+// Author: Kirill Smelkov (kirr@nexedi.com)
+//
+// Search for stream-like files that are using nonseekable_open and convert
+// them to stream_open. A stream-like file is a file that does not use ppos in
+// its read and write. Rationale for the conversion is to avoid deadlock in
+// between read and write.
+
+virtual report
+virtual patch
+virtual explain  // explain decisions in the patch (SPFLAGS="-D explain")
+
+// stream-like reader & writer - ones that do not depend on f_pos.
+@ stream_reader @
+identifier readstream, ppos;
+identifier f, buf, len;
+type loff_t;
+@@
+  ssize_t readstream(struct file *f, char *buf, size_t len, loff_t *ppos)
+  {
+    ... when != ppos
+  }
+
+@ stream_writer @
+identifier writestream, ppos;
+identifier f, buf, len;
+type loff_t;
+@@
+  ssize_t writestream(struct file *f, const char *buf, size_t len, loff_t *ppos)
+  {
+    ... when != ppos
+  }
+
+
+// a function that blocks
+@ blocks @
+identifier block_f;
+identifier wait_event =~ "^wait_event_.*";
+@@
+  block_f(...) {
+    ... when exists
+    wait_event(...)
+    ... when exists
+  }
+
+// stream_reader that can block inside.
+//
+// XXX wait_* can be called not directly from current function (e.g. func -> f -> g -> wait())
+// XXX currently reader_blocks supports only direct and 1-level indirect cases.
+@ reader_blocks_direct @
+identifier stream_reader.readstream;
+identifier wait_event =~ "^wait_event_.*";
+@@
+  readstream(...)
+  {
+    ... when exists
+    wait_event(...)
+    ... when exists
+  }
+
+@ reader_blocks_1 @
+identifier stream_reader.readstream;
+identifier blocks.block_f;
+@@
+  readstream(...)
+  {
+    ... when exists
+    block_f(...)
+    ... when exists
+  }
+
+@ reader_blocks depends on reader_blocks_direct || reader_blocks_1 @
+identifier stream_reader.readstream;
+@@
+  readstream(...) {
+    ...
+  }
+
+
+// file_operations + whether they have _any_ .read, .write, .llseek ... at all.
+//
+// XXX add support for file_operations xxx[N] = ...    (sound/core/pcm_native.c)
+@ fops0 @
+identifier fops;
+@@
+  struct file_operations fops = {
+    ...
+  };
+
+@ has_read @
+identifier fops0.fops;
+identifier read_f;
+@@
+  struct file_operations fops = {
+    .read = read_f,
+  };
+
+@ has_read_iter @
+identifier fops0.fops;
+identifier read_iter_f;
+@@
+  struct file_operations fops = {
+    .read_iter = read_iter_f,
+  };
+
+@ has_write @
+identifier fops0.fops;
+identifier write_f;
+@@
+  struct file_operations fops = {
+    .write = write_f,
+  };
+
+@ has_write_iter @
+identifier fops0.fops;
+identifier write_iter_f;
+@@
+  struct file_operations fops = {
+    .write_iter = write_iter_f,
+  };
+
+@ has_llseek @
+identifier fops0.fops;
+identifier llseek_f;
+@@
+  struct file_operations fops = {
+    .llseek = llseek_f,
+  };
+
+@ has_no_llseek @
+identifier fops0.fops;
+@@
+  struct file_operations fops = {
+    .llseek = no_llseek,
+  };
+
+@ has_mmap @
+identifier fops0.fops;
+identifier mmap_f;
+@@
+  struct file_operations fops = {
+    .mmap = mmap_f,
+  };
+
+@ has_copy_file_range @
+identifier fops0.fops;
+identifier copy_file_range_f;
+@@
+  struct file_operations fops = {
+    .copy_file_range = copy_file_range_f,
+  };
+
+@ has_remap_file_range @
+identifier fops0.fops;
+identifier remap_file_range_f;
+@@
+  struct file_operations fops = {
+    .remap_file_range = remap_file_range_f,
+  };
+
+@ has_splice_read @
+identifier fops0.fops;
+identifier splice_read_f;
+@@
+  struct file_operations fops = {
+    .splice_read = splice_read_f,
+  };
+
+@ has_splice_write @
+identifier fops0.fops;
+identifier splice_write_f;
+@@
+  struct file_operations fops = {
+    .splice_write = splice_write_f,
+  };
+
+
+// file_operations that is candidate for stream_open conversion - it does not
+// use mmap and other methods that assume @offset access to file.
+//
+// XXX for simplicity require no .{read/write}_iter and no .splice_{read/write} for now.
+// XXX maybe_steam.fops cannot be used in other rules - it gives "bad rule maybe_stream or bad variable fops".
+@ maybe_stream depends on (!has_llseek || has_no_llseek) && !has_mmap && !has_copy_file_range && !has_remap_file_range && !has_read_iter && !has_write_iter && !has_splice_read && !has_splice_write @
+identifier fops0.fops;
+@@
+  struct file_operations fops = {
+  };
+
+
+// ---- conversions ----
+
+// XXX .open = nonseekable_open -> .open = stream_open
+// XXX .open = func -> openfunc -> nonseekable_open
+
+// read & write
+//
+// if both are used in the same file_operations together with an opener -
+// under that conditions we can use stream_open instead of nonseekable_open.
+@ fops_rw depends on maybe_stream @
+identifier fops0.fops, openfunc;
+identifier stream_reader.readstream;
+identifier stream_writer.writestream;
+@@
+  struct file_operations fops = {
+      .open  = openfunc,
+      .read  = readstream,
+      .write = writestream,
+  };
+
+@ report_rw depends on report @
+identifier fops_rw.openfunc;
+position p1;
+@@
+  openfunc(...) {
+    <...
+     nonseekable_open@p1
+    ...>
+  }
+
+@ script:python depends on report && reader_blocks @
+fops << fops0.fops;
+p << report_rw.p1;
+@@
+coccilib.report.print_report(p[0],
+  "ERROR: %s: .read() can deadlock .write(); change nonseekable_open -> stream_open to fix." % (fops,))
+
+@ script:python depends on report && !reader_blocks @
+fops << fops0.fops;
+p << report_rw.p1;
+@@
+coccilib.report.print_report(p[0],
+  "WARNING: %s: .read() and .write() have stream semantic; safe to change nonseekable_open -> stream_open." % (fops,))
+
+
+@ explain_rw_deadlocked depends on explain && reader_blocks @
+identifier fops_rw.openfunc;
+@@
+  openfunc(...) {
+    <...
+-    nonseekable_open
++    nonseekable_open /* read & write (was deadlock) */
+    ...>
+  }
+
+
+@ explain_rw_nodeadlock depends on explain && !reader_blocks @
+identifier fops_rw.openfunc;
+@@
+  openfunc(...) {
+    <...
+-    nonseekable_open
++    nonseekable_open /* read & write (no direct deadlock) */
+    ...>
+  }
+
+@ patch_rw depends on patch @
+identifier fops_rw.openfunc;
+@@
+  openfunc(...) {
+    <...
+-   nonseekable_open
++   stream_open
+    ...>
+  }
+
+
+// read, but not write
+@ fops_r depends on maybe_stream && !has_write @
+identifier fops0.fops, openfunc;
+identifier stream_reader.readstream;
+@@
+  struct file_operations fops = {
+      .open  = openfunc,
+      .read  = readstream,
+  };
+
+@ report_r depends on report @
+identifier fops_r.openfunc;
+position p1;
+@@
+  openfunc(...) {
+    <...
+    nonseekable_open@p1
+    ...>
+  }
+
+@ script:python depends on report @
+fops << fops0.fops;
+p << report_r.p1;
+@@
+coccilib.report.print_report(p[0],
+  "WARNING: %s: .read() has stream semantic; safe to change nonseekable_open -> stream_open." % (fops,))
+
+@ explain_r depends on explain @
+identifier fops_r.openfunc;
+@@
+  openfunc(...) {
+    <...
+-   nonseekable_open
++   nonseekable_open /* read only */
+    ...>
+  }
+
+@ patch_r depends on patch @
+identifier fops_r.openfunc;
+@@
+  openfunc(...) {
+    <...
+-   nonseekable_open
++   stream_open
+    ...>
+  }
+
+
+// write, but not read
+@ fops_w depends on maybe_stream && !has_read @
+identifier fops0.fops, openfunc;
+identifier stream_writer.writestream;
+@@
+  struct file_operations fops = {
+      .open  = openfunc,
+      .write = writestream,
+  };
+
+@ report_w depends on report @
+identifier fops_w.openfunc;
+position p1;
+@@
+  openfunc(...) {
+    <...
+    nonseekable_open@p1
+    ...>
+  }
+
+@ script:python depends on report @
+fops << fops0.fops;
+p << report_w.p1;
+@@
+coccilib.report.print_report(p[0],
+  "WARNING: %s: .write() has stream semantic; safe to change nonseekable_open -> stream_open." % (fops,))
+
+@ explain_w depends on explain @
+identifier fops_w.openfunc;
+@@
+  openfunc(...) {
+    <...
+-   nonseekable_open
++   nonseekable_open /* write only */
+    ...>
+  }
+
+@ patch_w depends on patch @
+identifier fops_w.openfunc;
+@@
+  openfunc(...) {
+    <...
+-   nonseekable_open
++   stream_open
+    ...>
+  }
+
+
+// no read, no write - don't change anything
index 49d664ddff444810ef9c6e8a1b0276c5ba473c53..87500bde5a92d599ccaa4e892ff12527feac58eb 100644 (file)
@@ -1336,9 +1336,16 @@ module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR);
 bool aa_g_paranoid_load = true;
 module_param_named(paranoid_load, aa_g_paranoid_load, aabool, S_IRUGO);
 
+static int param_get_aaintbool(char *buffer, const struct kernel_param *kp);
+static int param_set_aaintbool(const char *val, const struct kernel_param *kp);
+#define param_check_aaintbool param_check_int
+static const struct kernel_param_ops param_ops_aaintbool = {
+       .set = param_set_aaintbool,
+       .get = param_get_aaintbool
+};
 /* Boot time disable flag */
 static int apparmor_enabled __lsm_ro_after_init = 1;
-module_param_named(enabled, apparmor_enabled, int, 0444);
+module_param_named(enabled, apparmor_enabled, aaintbool, 0444);
 
 static int __init apparmor_enabled_setup(char *str)
 {
@@ -1413,6 +1420,46 @@ static int param_get_aauint(char *buffer, const struct kernel_param *kp)
        return param_get_uint(buffer, kp);
 }
 
+/* Can only be set before AppArmor is initialized (i.e. on boot cmdline). */
+static int param_set_aaintbool(const char *val, const struct kernel_param *kp)
+{
+       struct kernel_param kp_local;
+       bool value;
+       int error;
+
+       if (apparmor_initialized)
+               return -EPERM;
+
+       /* Create local copy, with arg pointing to bool type. */
+       value = !!*((int *)kp->arg);
+       memcpy(&kp_local, kp, sizeof(kp_local));
+       kp_local.arg = &value;
+
+       error = param_set_bool(val, &kp_local);
+       if (!error)
+               *((int *)kp->arg) = *((bool *)kp_local.arg);
+       return error;
+}
+
+/*
+ * To avoid changing /sys/module/apparmor/parameters/enabled from Y/N to
+ * 1/0, this converts the "int that is actually bool" back to bool for
+ * display in the /sys filesystem, while keeping it "int" for the LSM
+ * infrastructure.
+ */
+static int param_get_aaintbool(char *buffer, const struct kernel_param *kp)
+{
+       struct kernel_param kp_local;
+       bool value;
+
+       /* Create local copy, with arg pointing to bool type. */
+       value = !!*((int *)kp->arg);
+       memcpy(&kp_local, kp, sizeof(kp_local));
+       kp_local.arg = &value;
+
+       return param_get_bool(buffer, &kp_local);
+}
+
 static int param_get_audit(char *buffer, const struct kernel_param *kp)
 {
        if (!apparmor_enabled)
index cd97929fac663f61250edeae3397e3ab75b5ff49..dc28914fa72e076405b238225dc54c172df5de40 100644 (file)
@@ -560,7 +560,7 @@ static int propagate_exception(struct dev_cgroup *devcg_root,
                    devcg->behavior == DEVCG_DEFAULT_ALLOW) {
                        rc = dev_exception_add(devcg, ex);
                        if (rc)
-                               break;
+                               return rc;
                } else {
                        /*
                         * in the other possible cases:
index bcc9c6ead7fd30962cf2ac3755e61d011d62c3da..efdbf17f3915259ea34ed76f5d452a4763236043 100644 (file)
@@ -125,7 +125,7 @@ out:
  */
 int TSS_authhmac(unsigned char *digest, const unsigned char *key,
                        unsigned int keylen, unsigned char *h1,
-                       unsigned char *h2, unsigned char h3, ...)
+                       unsigned char *h2, unsigned int h3, ...)
 {
        unsigned char paramdigest[SHA1_DIGEST_SIZE];
        struct sdesc *sdesc;
@@ -135,13 +135,16 @@ int TSS_authhmac(unsigned char *digest, const unsigned char *key,
        int ret;
        va_list argp;
 
+       if (!chip)
+               return -ENODEV;
+
        sdesc = init_sdesc(hashalg);
        if (IS_ERR(sdesc)) {
                pr_info("trusted_key: can't alloc %s\n", hash_alg);
                return PTR_ERR(sdesc);
        }
 
-       c = h3;
+       c = !!h3;
        ret = crypto_shash_init(&sdesc->shash);
        if (ret < 0)
                goto out;
@@ -196,6 +199,9 @@ int TSS_checkhmac1(unsigned char *buffer,
        va_list argp;
        int ret;
 
+       if (!chip)
+               return -ENODEV;
+
        bufsize = LOAD32(buffer, TPM_SIZE_OFFSET);
        tag = LOAD16(buffer, 0);
        ordinal = command;
@@ -363,6 +369,9 @@ int trusted_tpm_send(unsigned char *cmd, size_t buflen)
 {
        int rc;
 
+       if (!chip)
+               return -ENODEV;
+
        dump_tpm_buf(cmd);
        rc = tpm_send(chip, cmd, buflen);
        dump_tpm_buf(cmd);
@@ -429,6 +438,9 @@ int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce)
 {
        int ret;
 
+       if (!chip)
+               return -ENODEV;
+
        INIT_BUF(tb);
        store16(tb, TPM_TAG_RQU_COMMAND);
        store32(tb, TPM_OIAP_SIZE);
@@ -1245,9 +1257,13 @@ static int __init init_trusted(void)
 {
        int ret;
 
+       /* encrypted_keys.ko depends on successful load of this module even if
+        * TPM is not used.
+        */
        chip = tpm_default_chip();
        if (!chip)
-               return -ENOENT;
+               return 0;
+
        ret = init_digests();
        if (ret < 0)
                goto err_put;
@@ -1269,10 +1285,12 @@ err_put:
 
 static void __exit cleanup_trusted(void)
 {
-       put_device(&chip->dev);
-       kfree(digests);
-       trusted_shash_release();
-       unregister_key_type(&key_type_trusted);
+       if (chip) {
+               put_device(&chip->dev);
+               kfree(digests);
+               trusted_shash_release();
+               unregister_key_type(&key_type_trusted);
+       }
 }
 
 late_initcall(init_trusted);
index 96a074019c33c28b5587d7d833110eced98669c0..0eb169acc85031f5f2a5bdc8e3e7b9b3b66a97b7 100644 (file)
@@ -713,8 +713,11 @@ snd_info_create_entry(const char *name, struct snd_info_entry *parent,
        INIT_LIST_HEAD(&entry->list);
        entry->parent = parent;
        entry->module = module;
-       if (parent)
+       if (parent) {
+               mutex_lock(&parent->access);
                list_add_tail(&entry->list, &parent->children);
+               mutex_unlock(&parent->access);
+       }
        return entry;
 }
 
@@ -792,7 +795,12 @@ void snd_info_free_entry(struct snd_info_entry * entry)
        list_for_each_entry_safe(p, n, &entry->children, list)
                snd_info_free_entry(p);
 
-       list_del(&entry->list);
+       p = entry->parent;
+       if (p) {
+               mutex_lock(&p->access);
+               list_del(&entry->list);
+               mutex_unlock(&p->access);
+       }
        kfree(entry->name);
        if (entry->private_free)
                entry->private_free(entry);
index 0c4dc40376a709ff2e8aabd2f9ac4d25660389be..079c12d64b0e3112361ab2a4497df41155aa7f62 100644 (file)
@@ -382,14 +382,7 @@ int snd_card_disconnect(struct snd_card *card)
        card->shutdown = 1;
        spin_unlock(&card->files_lock);
 
-       /* phase 1: disable fops (user space) operations for ALSA API */
-       mutex_lock(&snd_card_mutex);
-       snd_cards[card->number] = NULL;
-       clear_bit(card->number, snd_cards_lock);
-       mutex_unlock(&snd_card_mutex);
-       
-       /* phase 2: replace file->f_op with special dummy operations */
-       
+       /* replace file->f_op with special dummy operations */
        spin_lock(&card->files_lock);
        list_for_each_entry(mfile, &card->files_list, list) {
                /* it's critical part, use endless loop */
@@ -405,7 +398,7 @@ int snd_card_disconnect(struct snd_card *card)
        }
        spin_unlock(&card->files_lock); 
 
-       /* phase 3: notify all connected devices about disconnection */
+       /* notify all connected devices about disconnection */
        /* at this point, they cannot respond to any calls except release() */
 
 #if IS_ENABLED(CONFIG_SND_MIXER_OSS)
@@ -421,6 +414,13 @@ int snd_card_disconnect(struct snd_card *card)
                device_del(&card->card_dev);
                card->registered = false;
        }
+
+       /* disable fops (user space) operations for ALSA API */
+       mutex_lock(&snd_card_mutex);
+       snd_cards[card->number] = NULL;
+       clear_bit(card->number, snd_cards_lock);
+       mutex_unlock(&snd_card_mutex);
+
 #ifdef CONFIG_PM
        wake_up(&card->power_sleep);
 #endif
index 7d4640d1fe9fb8a8ab8eecf045798497cb3e38f3..38e7deab638479ef9525c67fcd4f0fe37101cfb2 100644 (file)
@@ -1252,7 +1252,7 @@ static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
 
        /* fill the info fields */
        if (client_info->name[0])
-               strlcpy(client->name, client_info->name, sizeof(client->name));
+               strscpy(client->name, client_info->name, sizeof(client->name));
 
        client->filter = client_info->filter;
        client->event_lost = client_info->event_lost;
@@ -1530,7 +1530,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
        /* set queue name */
        if (!info->name[0])
                snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
-       strlcpy(q->name, info->name, sizeof(q->name));
+       strscpy(q->name, info->name, sizeof(q->name));
        snd_use_lock_free(&q->use_lock);
 
        return 0;
@@ -1592,7 +1592,7 @@ static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client,
                queuefree(q);
                return -EPERM;
        }
-       strlcpy(q->name, info->name, sizeof(q->name));
+       strscpy(q->name, info->name, sizeof(q->name));
        queuefree(q);
 
        return 0;
index 9c37d9af3023f67bdba2ba4a3d207580cd1f70c5..ec7715c6b0c02c9bc940ed4b16f387b509bd1907 100644 (file)
@@ -107,7 +107,6 @@ int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev,
        INIT_LIST_HEAD(&bus->hlink_list);
        bus->idx = idx++;
 
-       mutex_init(&bus->lock);
        bus->cmd_dma_state = true;
 
        return 0;
index 012305177f68227af7bb25890a92c1ef93690221..ad8eee08013fb838e228daaa23cd974a92c34325 100644 (file)
@@ -38,6 +38,7 @@ int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev,
        INIT_WORK(&bus->unsol_work, snd_hdac_bus_process_unsol_events);
        spin_lock_init(&bus->reg_lock);
        mutex_init(&bus->cmd_mutex);
+       mutex_init(&bus->lock);
        bus->irq = -1;
        return 0;
 }
index 5c95933e739a43bc5cd30829e43c0381cff1e989..1ea51e3b942a034a1b487bb2ad7dc054893a4d39 100644 (file)
@@ -69,13 +69,15 @@ void snd_hdac_display_power(struct hdac_bus *bus, unsigned int idx, bool enable)
 
        dev_dbg(bus->dev, "display power %s\n",
                enable ? "enable" : "disable");
+
+       mutex_lock(&bus->lock);
        if (enable)
                set_bit(idx, &bus->display_power_status);
        else
                clear_bit(idx, &bus->display_power_status);
 
        if (!acomp || !acomp->ops)
-               return;
+               goto unlock;
 
        if (bus->display_power_status) {
                if (!bus->display_power_active) {
@@ -92,6 +94,8 @@ void snd_hdac_display_power(struct hdac_bus *bus, unsigned int idx, bool enable)
                        bus->display_power_active = false;
                }
        }
+ unlock:
+       mutex_unlock(&bus->lock);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_display_power);
 
index ec0b8595eb4da448a51d3376c9913e96f09e3075..701a69d856f5ff7acfb9e264e58abf5cf9f3215e 100644 (file)
@@ -969,6 +969,7 @@ int snd_hda_codec_device_new(struct hda_bus *bus, struct snd_card *card,
 
        /* power-up all before initialization */
        hda_set_power_state(codec, AC_PWRST_D0);
+       codec->core.dev.power.power_state = PMSG_ON;
 
        snd_hda_codec_proc_new(codec);
 
index ece256a3b48f3b9108615931d8727c86d0d0ded5..2ec91085fa3e7708d27a605747213f2277b9bc2b 100644 (file)
@@ -2142,6 +2142,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
        SND_PCI_QUIRK(0x8086, 0x2040, "Intel DZ77BH-55K", 0),
        /* https://bugzilla.kernel.org/show_bug.cgi?id=199607 */
        SND_PCI_QUIRK(0x8086, 0x2057, "Intel NUC5i7RYB", 0),
+       /* https://bugs.launchpad.net/bugs/1821663 */
+       SND_PCI_QUIRK(0x8086, 0x2064, "Intel SDP 8086:2064", 0),
        /* https://bugzilla.redhat.com/show_bug.cgi?id=1520902 */
        SND_PCI_QUIRK(0x8086, 0x2068, "Intel NUC7i3BNB", 0),
        /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
@@ -2150,6 +2152,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
        SND_PCI_QUIRK(0x17aa, 0x367b, "Lenovo IdeaCentre B550", 0),
        /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
        SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
+       /* https://bugs.launchpad.net/bugs/1821663 */
+       SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0),
        {}
 };
 #endif /* CONFIG_PM */
index a3fb3d4c573090a156bf32534b1eed033f3ade16..f5b510f119edd27d9328011c0dc0d9709c7f74e4 100644 (file)
@@ -1864,8 +1864,8 @@ enum {
        ALC887_FIXUP_BASS_CHMAP,
        ALC1220_FIXUP_GB_DUAL_CODECS,
        ALC1220_FIXUP_CLEVO_P950,
-       ALC1220_FIXUP_SYSTEM76_ORYP5,
-       ALC1220_FIXUP_SYSTEM76_ORYP5_PINS,
+       ALC1220_FIXUP_CLEVO_PB51ED,
+       ALC1220_FIXUP_CLEVO_PB51ED_PINS,
 };
 
 static void alc889_fixup_coef(struct hda_codec *codec,
@@ -2070,7 +2070,7 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
 static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
                                const struct hda_fixup *fix, int action);
 
-static void alc1220_fixup_system76_oryp5(struct hda_codec *codec,
+static void alc1220_fixup_clevo_pb51ed(struct hda_codec *codec,
                                     const struct hda_fixup *fix,
                                     int action)
 {
@@ -2322,18 +2322,18 @@ static const struct hda_fixup alc882_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc1220_fixup_clevo_p950,
        },
-       [ALC1220_FIXUP_SYSTEM76_ORYP5] = {
+       [ALC1220_FIXUP_CLEVO_PB51ED] = {
                .type = HDA_FIXUP_FUNC,
-               .v.func = alc1220_fixup_system76_oryp5,
+               .v.func = alc1220_fixup_clevo_pb51ed,
        },
-       [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = {
+       [ALC1220_FIXUP_CLEVO_PB51ED_PINS] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
                        { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
                        {}
                },
                .chained = true,
-               .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5,
+               .chain_id = ALC1220_FIXUP_CLEVO_PB51ED,
        },
 };
 
@@ -2411,8 +2411,9 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
-       SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
-       SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
+       SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+       SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+       SND_PCI_QUIRK(0x1558, 0x65d1, "Tuxedo Book XC1509", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
        SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
        SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
@@ -5491,7 +5492,7 @@ static void alc_headset_btn_callback(struct hda_codec *codec,
        jack->jack->button_state = report;
 }
 
-static void alc295_fixup_chromebook(struct hda_codec *codec,
+static void alc_fixup_headset_jack(struct hda_codec *codec,
                                    const struct hda_fixup *fix, int action)
 {
 
@@ -5501,16 +5502,6 @@ static void alc295_fixup_chromebook(struct hda_codec *codec,
                                                    alc_headset_btn_callback);
                snd_hda_jack_add_kctl(codec, 0x55, "Headset Jack", false,
                                      SND_JACK_HEADSET, alc_headset_btn_keymap);
-               switch (codec->core.vendor_id) {
-               case 0x10ec0295:
-                       alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */
-                       alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15);
-                       break;
-               case 0x10ec0236:
-                       alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
-                       alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
-                       break;
-               }
                break;
        case HDA_FIXUP_ACT_INIT:
                switch (codec->core.vendor_id) {
@@ -5531,6 +5522,25 @@ static void alc295_fixup_chromebook(struct hda_codec *codec,
        }
 }
 
+static void alc295_fixup_chromebook(struct hda_codec *codec,
+                                   const struct hda_fixup *fix, int action)
+{
+       switch (action) {
+       case HDA_FIXUP_ACT_INIT:
+               switch (codec->core.vendor_id) {
+               case 0x10ec0295:
+                       alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */
+                       alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15);
+                       break;
+               case 0x10ec0236:
+                       alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
+                       alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
+                       break;
+               }
+               break;
+       }
+}
+
 static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
                                  const struct hda_fixup *fix, int action)
 {
@@ -5663,6 +5673,7 @@ enum {
        ALC233_FIXUP_ASUS_MIC_NO_PRESENCE,
        ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE,
        ALC233_FIXUP_LENOVO_MULTI_CODECS,
+       ALC233_FIXUP_ACER_HEADSET_MIC,
        ALC294_FIXUP_LENOVO_MIC_LOCATION,
        ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE,
        ALC700_FIXUP_INTEL_REFERENCE,
@@ -5684,6 +5695,7 @@ enum {
        ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
        ALC255_FIXUP_ACER_HEADSET_MIC,
        ALC295_FIXUP_CHROME_BOOK,
+       ALC225_FIXUP_HEADSET_JACK,
        ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE,
        ALC225_FIXUP_WYSE_AUTO_MUTE,
        ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
@@ -6490,6 +6502,16 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc233_alc662_fixup_lenovo_dual_codecs,
        },
+       [ALC233_FIXUP_ACER_HEADSET_MIC] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC233_FIXUP_ASUS_MIC_NO_PRESENCE
+       },
        [ALC294_FIXUP_LENOVO_MIC_LOCATION] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
@@ -6635,6 +6657,12 @@ static const struct hda_fixup alc269_fixups[] = {
        [ALC295_FIXUP_CHROME_BOOK] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc295_fixup_chromebook,
+               .chained = true,
+               .chain_id = ALC225_FIXUP_HEADSET_JACK
+       },
+       [ALC225_FIXUP_HEADSET_JACK] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_headset_jack,
        },
        [ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = {
                .type = HDA_FIXUP_PINS,
@@ -6737,6 +6765,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
        SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
@@ -7132,7 +7161,8 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC255_FIXUP_DUMMY_LINEOUT_VERB, .name = "alc255-dummy-lineout"},
        {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"},
        {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"},
-       {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-sense-combo"},
+       {.id = ALC225_FIXUP_HEADSET_JACK, .name = "alc-headset-jack"},
+       {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-chrome-book"},
        {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
        {}
 };
@@ -7236,6 +7266,8 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x12, 0x90a60140},
                {0x14, 0x90170150},
                {0x21, 0x02211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x21, 0x02211020}),
        SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
                {0x14, 0x90170110},
                {0x21, 0x02211020}),
@@ -7346,6 +7378,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x21, 0x0221101f}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC256_STANDARD_PINS),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x14, 0x90170110},
+               {0x1b, 0x01011020},
+               {0x21, 0x0221101f}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC,
                {0x14, 0x90170110},
                {0x1b, 0x90a70130},
index 419114edfd57db8f341184148fcf4d7ab7d67a0a..667fc1d59e189f599e580654c10719b96b7e74a0 100644 (file)
@@ -1151,6 +1151,7 @@ config SND_SOC_WCD9335
        tristate "WCD9335 Codec"
        depends on SLIMBUS
        select REGMAP_SLIMBUS
+       select REGMAP_IRQ
        help
          The WCD9335 is a standalone Hi-Fi audio CODEC IC, supports
          Qualcomm Technologies, Inc. (QTI) multimedia solutions,
index 03bbbcd3b6c115254a75f367c40447f012edc8c4..87616b126018b3cb9b9a5c8f4ca7a3af51b97264 100644 (file)
@@ -2129,6 +2129,7 @@ static int ab8500_codec_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
                dev_err(dai->component->dev,
                        "%s: ERROR: The device is either a master or a slave.\n",
                        __func__);
+               /* fall through */
        default:
                dev_err(dai->component->dev,
                        "%s: ERROR: Unsupporter master mask 0x%x\n",
index 9f4a59871cee72b2011acf20d8167e12933ec2fc..c71696146c5ec17e17751903a592038c4f8347e4 100644 (file)
@@ -1635,6 +1635,16 @@ err:
        return ret;
 }
 
+static int cs35l35_i2c_remove(struct i2c_client *i2c_client)
+{
+       struct cs35l35_private *cs35l35 = i2c_get_clientdata(i2c_client);
+
+       regulator_bulk_disable(cs35l35->num_supplies, cs35l35->supplies);
+       gpiod_set_value_cansleep(cs35l35->reset_gpio, 0);
+
+       return 0;
+}
+
 static const struct of_device_id cs35l35_of_match[] = {
        {.compatible = "cirrus,cs35l35"},
        {},
@@ -1655,6 +1665,7 @@ static struct i2c_driver cs35l35_i2c_driver = {
        },
        .id_table = cs35l35_id,
        .probe = cs35l35_i2c_probe,
+       .remove = cs35l35_i2c_remove,
 };
 
 module_i2c_driver(cs35l35_i2c_driver);
index 33d74f163bd753820bb77bd81d8f0a28461410e5..793a14d586672bc2b76b143b5756206becce28a8 100644 (file)
@@ -642,6 +642,7 @@ static const struct regmap_config cs4270_regmap = {
        .reg_defaults =         cs4270_reg_defaults,
        .num_reg_defaults =     ARRAY_SIZE(cs4270_reg_defaults),
        .cache_type =           REGCACHE_RBTREE,
+       .write_flag_mask =      CS4270_I2C_INCR,
 
        .readable_reg =         cs4270_reg_is_readable,
        .volatile_reg =         cs4270_reg_is_volatile,
index ffecdaaa8cf2bb2e69bde2de7e2acb92cbdec0a1..f889d94c8e3cf707f0bcab6cdb7f860851d7042f 100644 (file)
@@ -38,6 +38,9 @@ static void hdac_hda_dai_close(struct snd_pcm_substream *substream,
                               struct snd_soc_dai *dai);
 static int hdac_hda_dai_prepare(struct snd_pcm_substream *substream,
                                struct snd_soc_dai *dai);
+static int hdac_hda_dai_hw_params(struct snd_pcm_substream *substream,
+                                 struct snd_pcm_hw_params *params,
+                                 struct snd_soc_dai *dai);
 static int hdac_hda_dai_hw_free(struct snd_pcm_substream *substream,
                                struct snd_soc_dai *dai);
 static int hdac_hda_dai_set_tdm_slot(struct snd_soc_dai *dai,
@@ -50,6 +53,7 @@ static const struct snd_soc_dai_ops hdac_hda_dai_ops = {
        .startup = hdac_hda_dai_open,
        .shutdown = hdac_hda_dai_close,
        .prepare = hdac_hda_dai_prepare,
+       .hw_params = hdac_hda_dai_hw_params,
        .hw_free = hdac_hda_dai_hw_free,
        .set_tdm_slot = hdac_hda_dai_set_tdm_slot,
 };
@@ -139,6 +143,39 @@ static int hdac_hda_dai_set_tdm_slot(struct snd_soc_dai *dai,
        return 0;
 }
 
+static int hdac_hda_dai_hw_params(struct snd_pcm_substream *substream,
+                                 struct snd_pcm_hw_params *params,
+                                 struct snd_soc_dai *dai)
+{
+       struct snd_soc_component *component = dai->component;
+       struct hdac_hda_priv *hda_pvt;
+       unsigned int format_val;
+       unsigned int maxbps;
+
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+               maxbps = dai->driver->playback.sig_bits;
+       else
+               maxbps = dai->driver->capture.sig_bits;
+
+       hda_pvt = snd_soc_component_get_drvdata(component);
+       format_val = snd_hdac_calc_stream_format(params_rate(params),
+                                                params_channels(params),
+                                                params_format(params),
+                                                maxbps,
+                                                0);
+       if (!format_val) {
+               dev_err(dai->dev,
+                       "invalid format_val, rate=%d, ch=%d, format=%d, maxbps=%d\n",
+                       params_rate(params), params_channels(params),
+                       params_format(params), maxbps);
+
+               return -EINVAL;
+       }
+
+       hda_pvt->pcm[dai->id].format_val[substream->stream] = format_val;
+       return 0;
+}
+
 static int hdac_hda_dai_hw_free(struct snd_pcm_substream *substream,
                                struct snd_soc_dai *dai)
 {
@@ -162,10 +199,9 @@ static int hdac_hda_dai_prepare(struct snd_pcm_substream *substream,
                                struct snd_soc_dai *dai)
 {
        struct snd_soc_component *component = dai->component;
+       struct hda_pcm_stream *hda_stream;
        struct hdac_hda_priv *hda_pvt;
-       struct snd_pcm_runtime *runtime = substream->runtime;
        struct hdac_device *hdev;
-       struct hda_pcm_stream *hda_stream;
        unsigned int format_val;
        struct hda_pcm *pcm;
        unsigned int stream;
@@ -179,19 +215,8 @@ static int hdac_hda_dai_prepare(struct snd_pcm_substream *substream,
 
        hda_stream = &pcm->stream[substream->stream];
 
-       format_val = snd_hdac_calc_stream_format(runtime->rate,
-                                                runtime->channels,
-                                                runtime->format,
-                                                hda_stream->maxbps,
-                                                0);
-       if (!format_val) {
-               dev_err(&hdev->dev,
-                       "invalid format_val, rate=%d, ch=%d, format=%d\n",
-                       runtime->rate, runtime->channels, runtime->format);
-               return -EINVAL;
-       }
-
        stream = hda_pvt->pcm[dai->id].stream_tag[substream->stream];
+       format_val = hda_pvt->pcm[dai->id].format_val[substream->stream];
 
        ret = snd_hda_codec_prepare(&hda_pvt->codec, hda_stream,
                                    stream, format_val, substream);
index e444ef5933606ce8689f06ba187f3e84491d6336..6b1bd4f428e70ed4037adef636ef9c42878d2865 100644 (file)
@@ -8,6 +8,7 @@
 
 struct hdac_hda_pcm {
        int stream_tag[2];
+       unsigned int format_val[2];
 };
 
 struct hdac_hda_priv {
index e5b6769b9797724ceef38f3b8132200876f227e8..35df73e42cbc5f9d7bf6af4aa954bde49e8565e2 100644 (file)
@@ -484,9 +484,6 @@ static int hdmi_codec_hw_params(struct snd_pcm_substream *substream,
                params_width(params), params_rate(params),
                params_channels(params));
 
-       if (params_width(params) > 24)
-               params->msbits = 24;
-
        ret = snd_pcm_create_iec958_consumer_hw_params(params, hp.iec.status,
                                                       sizeof(hp.iec.status));
        if (ret < 0) {
@@ -529,73 +526,71 @@ static int hdmi_codec_set_fmt(struct snd_soc_dai *dai,
 {
        struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
        struct hdmi_codec_daifmt cf = { 0 };
-       int ret = 0;
 
        dev_dbg(dai->dev, "%s()\n", __func__);
 
-       if (dai->id == DAI_ID_SPDIF) {
-               cf.fmt = HDMI_SPDIF;
-       } else {
-               switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
-               case SND_SOC_DAIFMT_CBM_CFM:
-                       cf.bit_clk_master = 1;
-                       cf.frame_clk_master = 1;
-                       break;
-               case SND_SOC_DAIFMT_CBS_CFM:
-                       cf.frame_clk_master = 1;
-                       break;
-               case SND_SOC_DAIFMT_CBM_CFS:
-                       cf.bit_clk_master = 1;
-                       break;
-               case SND_SOC_DAIFMT_CBS_CFS:
-                       break;
-               default:
-                       return -EINVAL;
-               }
+       if (dai->id == DAI_ID_SPDIF)
+               return 0;
+
+       switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+       case SND_SOC_DAIFMT_CBM_CFM:
+               cf.bit_clk_master = 1;
+               cf.frame_clk_master = 1;
+               break;
+       case SND_SOC_DAIFMT_CBS_CFM:
+               cf.frame_clk_master = 1;
+               break;
+       case SND_SOC_DAIFMT_CBM_CFS:
+               cf.bit_clk_master = 1;
+               break;
+       case SND_SOC_DAIFMT_CBS_CFS:
+               break;
+       default:
+               return -EINVAL;
+       }
 
-               switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
-               case SND_SOC_DAIFMT_NB_NF:
-                       break;
-               case SND_SOC_DAIFMT_NB_IF:
-                       cf.frame_clk_inv = 1;
-                       break;
-               case SND_SOC_DAIFMT_IB_NF:
-                       cf.bit_clk_inv = 1;
-                       break;
-               case SND_SOC_DAIFMT_IB_IF:
-                       cf.frame_clk_inv = 1;
-                       cf.bit_clk_inv = 1;
-                       break;
-               }
+       switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+       case SND_SOC_DAIFMT_NB_NF:
+               break;
+       case SND_SOC_DAIFMT_NB_IF:
+               cf.frame_clk_inv = 1;
+               break;
+       case SND_SOC_DAIFMT_IB_NF:
+               cf.bit_clk_inv = 1;
+               break;
+       case SND_SOC_DAIFMT_IB_IF:
+               cf.frame_clk_inv = 1;
+               cf.bit_clk_inv = 1;
+               break;
+       }
 
-               switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
-               case SND_SOC_DAIFMT_I2S:
-                       cf.fmt = HDMI_I2S;
-                       break;
-               case SND_SOC_DAIFMT_DSP_A:
-                       cf.fmt = HDMI_DSP_A;
-                       break;
-               case SND_SOC_DAIFMT_DSP_B:
-                       cf.fmt = HDMI_DSP_B;
-                       break;
-               case SND_SOC_DAIFMT_RIGHT_J:
-                       cf.fmt = HDMI_RIGHT_J;
-                       break;
-               case SND_SOC_DAIFMT_LEFT_J:
-                       cf.fmt = HDMI_LEFT_J;
-                       break;
-               case SND_SOC_DAIFMT_AC97:
-                       cf.fmt = HDMI_AC97;
-                       break;
-               default:
-                       dev_err(dai->dev, "Invalid DAI interface format\n");
-                       return -EINVAL;
-               }
+       switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+       case SND_SOC_DAIFMT_I2S:
+               cf.fmt = HDMI_I2S;
+               break;
+       case SND_SOC_DAIFMT_DSP_A:
+               cf.fmt = HDMI_DSP_A;
+               break;
+       case SND_SOC_DAIFMT_DSP_B:
+               cf.fmt = HDMI_DSP_B;
+               break;
+       case SND_SOC_DAIFMT_RIGHT_J:
+               cf.fmt = HDMI_RIGHT_J;
+               break;
+       case SND_SOC_DAIFMT_LEFT_J:
+               cf.fmt = HDMI_LEFT_J;
+               break;
+       case SND_SOC_DAIFMT_AC97:
+               cf.fmt = HDMI_AC97;
+               break;
+       default:
+               dev_err(dai->dev, "Invalid DAI interface format\n");
+               return -EINVAL;
        }
 
        hcp->daifmt[dai->id] = cf;
 
-       return ret;
+       return 0;
 }
 
 static int hdmi_codec_digital_mute(struct snd_soc_dai *dai, int mute)
@@ -792,8 +787,10 @@ static int hdmi_codec_probe(struct platform_device *pdev)
                i++;
        }
 
-       if (hcd->spdif)
+       if (hcd->spdif) {
                hcp->daidrv[i] = hdmi_spdif_dai;
+               hcp->daifmt[DAI_ID_SPDIF].fmt = HDMI_SPDIF;
+       }
 
        dev_set_drvdata(dev, hcp);
 
index bfd74b86c9d2f43b8e19d8bd4c211c6d2f3cd887..645aa07941237d13cbebdf0d4bf17130f9dae1ae 100644 (file)
@@ -411,9 +411,9 @@ static const struct snd_soc_dapm_widget nau8810_dapm_widgets[] = {
        SND_SOC_DAPM_MIXER("Mono Mixer", NAU8810_REG_POWER3,
                NAU8810_MOUTMX_EN_SFT, 0, &nau8810_mono_mixer_controls[0],
                ARRAY_SIZE(nau8810_mono_mixer_controls)),
-       SND_SOC_DAPM_DAC("DAC", "HiFi Playback", NAU8810_REG_POWER3,
+       SND_SOC_DAPM_DAC("DAC", "Playback", NAU8810_REG_POWER3,
                NAU8810_DAC_EN_SFT, 0),
-       SND_SOC_DAPM_ADC("ADC", "HiFi Capture", NAU8810_REG_POWER2,
+       SND_SOC_DAPM_ADC("ADC", "Capture", NAU8810_REG_POWER2,
                NAU8810_ADC_EN_SFT, 0),
        SND_SOC_DAPM_PGA("SpkN Out", NAU8810_REG_POWER3,
                NAU8810_NSPK_EN_SFT, 0, NULL, 0),
index 87ed3dc496dc2de72043c8dbd5c66cbed7c0f923..5ab05e75edeac61219945488a455eafdc8f785f1 100644 (file)
@@ -681,8 +681,8 @@ static const struct snd_soc_dapm_widget nau8824_dapm_widgets[] = {
        SND_SOC_DAPM_ADC("ADCR", NULL, NAU8824_REG_ANALOG_ADC_2,
                NAU8824_ADCR_EN_SFT, 0),
 
-       SND_SOC_DAPM_AIF_OUT("AIFTX", "HiFi Capture", 0, SND_SOC_NOPM, 0, 0),
-       SND_SOC_DAPM_AIF_IN("AIFRX", "HiFi Playback", 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_OUT("AIFTX", "Capture", 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_IN("AIFRX", "Playback", 0, SND_SOC_NOPM, 0, 0),
 
        SND_SOC_DAPM_DAC("DACL", NULL, NAU8824_REG_RDAC,
                NAU8824_DACL_EN_SFT, 0),
@@ -831,6 +831,36 @@ static void nau8824_int_status_clear_all(struct regmap *regmap)
        }
 }
 
+static void nau8824_dapm_disable_pin(struct nau8824 *nau8824, const char *pin)
+{
+       struct snd_soc_dapm_context *dapm = nau8824->dapm;
+       const char *prefix = dapm->component->name_prefix;
+       char prefixed_pin[80];
+
+       if (prefix) {
+               snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s",
+                        prefix, pin);
+               snd_soc_dapm_disable_pin(dapm, prefixed_pin);
+       } else {
+               snd_soc_dapm_disable_pin(dapm, pin);
+       }
+}
+
+static void nau8824_dapm_enable_pin(struct nau8824 *nau8824, const char *pin)
+{
+       struct snd_soc_dapm_context *dapm = nau8824->dapm;
+       const char *prefix = dapm->component->name_prefix;
+       char prefixed_pin[80];
+
+       if (prefix) {
+               snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s",
+                        prefix, pin);
+               snd_soc_dapm_force_enable_pin(dapm, prefixed_pin);
+       } else {
+               snd_soc_dapm_force_enable_pin(dapm, pin);
+       }
+}
+
 static void nau8824_eject_jack(struct nau8824 *nau8824)
 {
        struct snd_soc_dapm_context *dapm = nau8824->dapm;
@@ -839,8 +869,8 @@ static void nau8824_eject_jack(struct nau8824 *nau8824)
        /* Clear all interruption status */
        nau8824_int_status_clear_all(regmap);
 
-       snd_soc_dapm_disable_pin(dapm, "SAR");
-       snd_soc_dapm_disable_pin(dapm, "MICBIAS");
+       nau8824_dapm_disable_pin(nau8824, "SAR");
+       nau8824_dapm_disable_pin(nau8824, "MICBIAS");
        snd_soc_dapm_sync(dapm);
 
        /* Enable the insertion interruption, disable the ejection
@@ -870,8 +900,8 @@ static void nau8824_jdet_work(struct work_struct *work)
        struct regmap *regmap = nau8824->regmap;
        int adc_value, event = 0, event_mask = 0;
 
-       snd_soc_dapm_force_enable_pin(dapm, "MICBIAS");
-       snd_soc_dapm_force_enable_pin(dapm, "SAR");
+       nau8824_dapm_enable_pin(nau8824, "MICBIAS");
+       nau8824_dapm_enable_pin(nau8824, "SAR");
        snd_soc_dapm_sync(dapm);
 
        msleep(100);
@@ -882,8 +912,8 @@ static void nau8824_jdet_work(struct work_struct *work)
        if (adc_value < HEADSET_SARADC_THD) {
                event |= SND_JACK_HEADPHONE;
 
-               snd_soc_dapm_disable_pin(dapm, "SAR");
-               snd_soc_dapm_disable_pin(dapm, "MICBIAS");
+               nau8824_dapm_disable_pin(nau8824, "SAR");
+               nau8824_dapm_disable_pin(nau8824, "MICBIAS");
                snd_soc_dapm_sync(dapm);
        } else {
                event |= SND_JACK_HEADSET;
index 9d5acd2d04abd47281b26d8f81331182c0288fb8..86a7fa31c294b2d3fb00494dd3c934976c2a2044 100644 (file)
@@ -910,13 +910,21 @@ static int rt5682_headset_detect(struct snd_soc_component *component,
                int jack_insert)
 {
        struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
-       struct snd_soc_dapm_context *dapm =
-               snd_soc_component_get_dapm(component);
        unsigned int val, count;
 
        if (jack_insert) {
-               snd_soc_dapm_force_enable_pin(dapm, "CBJ Power");
-               snd_soc_dapm_sync(dapm);
+
+               snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1,
+                       RT5682_PWR_VREF2 | RT5682_PWR_MB,
+                       RT5682_PWR_VREF2 | RT5682_PWR_MB);
+               snd_soc_component_update_bits(component,
+                               RT5682_PWR_ANLG_1, RT5682_PWR_FV2, 0);
+               usleep_range(15000, 20000);
+               snd_soc_component_update_bits(component,
+                               RT5682_PWR_ANLG_1, RT5682_PWR_FV2, RT5682_PWR_FV2);
+               snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3,
+                       RT5682_PWR_CBJ, RT5682_PWR_CBJ);
+
                snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
                        RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_HIGH);
 
@@ -944,8 +952,10 @@ static int rt5682_headset_detect(struct snd_soc_component *component,
                rt5682_enable_push_button_irq(component, false);
                snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
                        RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_LOW);
-               snd_soc_dapm_disable_pin(dapm, "CBJ Power");
-               snd_soc_dapm_sync(dapm);
+               snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1,
+                       RT5682_PWR_VREF2 | RT5682_PWR_MB, 0);
+               snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3,
+                       RT5682_PWR_CBJ, 0);
 
                rt5682->jack_type = 0;
        }
@@ -1198,7 +1208,7 @@ static int set_filter_clk(struct snd_soc_dapm_widget *w,
        struct snd_soc_component *component =
                snd_soc_dapm_to_component(w->dapm);
        struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
-       int ref, val, reg, sft, mask, idx = -EINVAL;
+       int ref, val, reg, idx = -EINVAL;
        static const int div_f[] = {1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48};
        static const int div_o[] = {1, 2, 4, 6, 8, 12, 16, 24, 32, 48};
 
@@ -1212,15 +1222,10 @@ static int set_filter_clk(struct snd_soc_dapm_widget *w,
 
        idx = rt5682_div_sel(rt5682, ref, div_f, ARRAY_SIZE(div_f));
 
-       if (w->shift == RT5682_PWR_ADC_S1F_BIT) {
+       if (w->shift == RT5682_PWR_ADC_S1F_BIT)
                reg = RT5682_PLL_TRACK_3;
-               sft = RT5682_ADC_OSR_SFT;
-               mask = RT5682_ADC_OSR_MASK;
-       } else {
+       else
                reg = RT5682_PLL_TRACK_2;
-               sft = RT5682_DAC_OSR_SFT;
-               mask = RT5682_DAC_OSR_MASK;
-       }
 
        snd_soc_component_update_bits(component, reg,
                RT5682_FILTER_CLK_DIV_MASK, idx << RT5682_FILTER_CLK_DIV_SFT);
@@ -1232,7 +1237,8 @@ static int set_filter_clk(struct snd_soc_dapm_widget *w,
        }
 
        snd_soc_component_update_bits(component, RT5682_ADDA_CLK_1,
-               mask, idx << sft);
+               RT5682_ADC_OSR_MASK | RT5682_DAC_OSR_MASK,
+               (idx << RT5682_ADC_OSR_SFT) | (idx << RT5682_DAC_OSR_SFT));
 
        return 0;
 }
@@ -1591,8 +1597,6 @@ static const struct snd_soc_dapm_widget rt5682_dapm_widgets[] = {
                0, NULL, 0),
        SND_SOC_DAPM_SUPPLY("Vref1", RT5682_PWR_ANLG_1, RT5682_PWR_VREF1_BIT, 0,
                rt5655_set_verf, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
-       SND_SOC_DAPM_SUPPLY("Vref2", RT5682_PWR_ANLG_1, RT5682_PWR_VREF2_BIT, 0,
-               rt5655_set_verf, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
 
        /* ASRC */
        SND_SOC_DAPM_SUPPLY_S("DAC STO1 ASRC", 1, RT5682_PLL_TRACK_1,
@@ -1627,9 +1631,6 @@ static const struct snd_soc_dapm_widget rt5682_dapm_widgets[] = {
        SND_SOC_DAPM_PGA("BST1 CBJ", SND_SOC_NOPM,
                0, 0, NULL, 0),
 
-       SND_SOC_DAPM_SUPPLY("CBJ Power", RT5682_PWR_ANLG_3,
-               RT5682_PWR_CBJ_BIT, 0, NULL, 0),
-
        /* REC Mixer */
        SND_SOC_DAPM_MIXER("RECMIX1L", SND_SOC_NOPM, 0, 0, rt5682_rec1_l_mix,
                ARRAY_SIZE(rt5682_rec1_l_mix)),
@@ -1792,17 +1793,13 @@ static const struct snd_soc_dapm_route rt5682_dapm_routes[] = {
 
        /*Vref*/
        {"MICBIAS1", NULL, "Vref1"},
-       {"MICBIAS1", NULL, "Vref2"},
        {"MICBIAS2", NULL, "Vref1"},
-       {"MICBIAS2", NULL, "Vref2"},
 
        {"CLKDET SYS", NULL, "CLKDET"},
 
        {"IN1P", NULL, "LDO2"},
 
        {"BST1 CBJ", NULL, "IN1P"},
-       {"BST1 CBJ", NULL, "CBJ Power"},
-       {"CBJ Power", NULL, "Vref2"},
 
        {"RECMIX1L", "CBJ Switch", "BST1 CBJ"},
        {"RECMIX1L", NULL, "RECMIX1L Power"},
@@ -1912,9 +1909,7 @@ static const struct snd_soc_dapm_route rt5682_dapm_routes[] = {
        {"HP Amp", NULL, "Capless"},
        {"HP Amp", NULL, "Charge Pump"},
        {"HP Amp", NULL, "CLKDET SYS"},
-       {"HP Amp", NULL, "CBJ Power"},
        {"HP Amp", NULL, "Vref1"},
-       {"HP Amp", NULL, "Vref2"},
        {"HPOL Playback", "Switch", "HP Amp"},
        {"HPOR Playback", "Switch", "HP Amp"},
        {"HPOL", NULL, "HPOL Playback"},
@@ -2303,16 +2298,13 @@ static int rt5682_set_bias_level(struct snd_soc_component *component,
        switch (level) {
        case SND_SOC_BIAS_PREPARE:
                regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1,
-                       RT5682_PWR_MB | RT5682_PWR_BG,
-                       RT5682_PWR_MB | RT5682_PWR_BG);
+                       RT5682_PWR_BG, RT5682_PWR_BG);
                regmap_update_bits(rt5682->regmap, RT5682_PWR_DIG_1,
                        RT5682_DIG_GATE_CTRL | RT5682_PWR_LDO,
                        RT5682_DIG_GATE_CTRL | RT5682_PWR_LDO);
                break;
 
        case SND_SOC_BIAS_STANDBY:
-               regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1,
-                       RT5682_PWR_MB, RT5682_PWR_MB);
                regmap_update_bits(rt5682->regmap, RT5682_PWR_DIG_1,
                        RT5682_DIG_GATE_CTRL, RT5682_DIG_GATE_CTRL);
                break;
@@ -2320,7 +2312,7 @@ static int rt5682_set_bias_level(struct snd_soc_component *component,
                regmap_update_bits(rt5682->regmap, RT5682_PWR_DIG_1,
                        RT5682_DIG_GATE_CTRL | RT5682_PWR_LDO, 0);
                regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1,
-                       RT5682_PWR_MB | RT5682_PWR_BG, 0);
+                       RT5682_PWR_BG, 0);
                break;
 
        default:
@@ -2363,6 +2355,8 @@ static int rt5682_resume(struct snd_soc_component *component)
        regcache_cache_only(rt5682->regmap, false);
        regcache_sync(rt5682->regmap);
 
+       rt5682_irq(0, rt5682);
+
        return 0;
 }
 #else
index 385fa2e9525abe2c89fc624baef239b2541b7328..22c3a6bc0b6c47ae90de8fc435127b089fb80517 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright 2011 NW Digital Radio
  *
- * Author: Jeremy McDermond <nh6z@nh6z.net>
+ * Author: Annaliese McDermond <nh6z@nh6z.net>
  *
  * Based on sound/soc/codecs/wm8974 and TI driver for kernel 2.6.27.
  *
@@ -72,5 +72,5 @@ static struct i2c_driver aic32x4_i2c_driver = {
 module_i2c_driver(aic32x4_i2c_driver);
 
 MODULE_DESCRIPTION("ASoC TLV320AIC32x4 codec driver I2C");
-MODULE_AUTHOR("Jeremy McDermond <nh6z@nh6z.net>");
+MODULE_AUTHOR("Annaliese McDermond <nh6z@nh6z.net>");
 MODULE_LICENSE("GPL");
index 07d78ae51e05c77bbd0a93410d0df2449be5d759..aa5b7ba0254bc6b7e009ce2adb6099d2d138819c 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright 2011 NW Digital Radio
  *
- * Author: Jeremy McDermond <nh6z@nh6z.net>
+ * Author: Annaliese McDermond <nh6z@nh6z.net>
  *
  * Based on sound/soc/codecs/wm8974 and TI driver for kernel 2.6.27.
  *
@@ -74,5 +74,5 @@ static struct spi_driver aic32x4_spi_driver = {
 module_spi_driver(aic32x4_spi_driver);
 
 MODULE_DESCRIPTION("ASoC TLV320AIC32x4 codec driver SPI");
-MODULE_AUTHOR("Jeremy McDermond <nh6z@nh6z.net>");
+MODULE_AUTHOR("Annaliese McDermond <nh6z@nh6z.net>");
 MODULE_LICENSE("GPL");
index 96f1526cb258a4e718afb86712aece3608c18003..5520044929f42ff6c51d18fb8338602127c79b91 100644 (file)
@@ -490,6 +490,8 @@ static const struct snd_soc_dapm_widget aic32x4_dapm_widgets[] = {
        SND_SOC_DAPM_INPUT("IN2_R"),
        SND_SOC_DAPM_INPUT("IN3_L"),
        SND_SOC_DAPM_INPUT("IN3_R"),
+       SND_SOC_DAPM_INPUT("CM_L"),
+       SND_SOC_DAPM_INPUT("CM_R"),
 };
 
 static const struct snd_soc_dapm_route aic32x4_dapm_routes[] = {
index 283583d1db60555f0831229be3b2f0e1a65bafee..516d17cb2182287f8f739a072fe3128687cecd93 100644 (file)
@@ -1609,7 +1609,6 @@ static int aic3x_probe(struct snd_soc_component *component)
        struct aic3x_priv *aic3x = snd_soc_component_get_drvdata(component);
        int ret, i;
 
-       INIT_LIST_HEAD(&aic3x->list);
        aic3x->component = component;
 
        for (i = 0; i < ARRAY_SIZE(aic3x->supplies); i++) {
@@ -1873,6 +1872,7 @@ static int aic3x_i2c_probe(struct i2c_client *i2c,
        if (ret != 0)
                goto err_gpio;
 
+       INIT_LIST_HEAD(&aic3x->list);
        list_add(&aic3x->list, &reset_list);
 
        return 0;
@@ -1889,6 +1889,8 @@ static int aic3x_i2c_remove(struct i2c_client *client)
 {
        struct aic3x_priv *aic3x = i2c_get_clientdata(client);
 
+       list_del(&aic3x->list);
+
        if (gpio_is_valid(aic3x->gpio_reset) &&
            !aic3x_is_shared_reset(aic3x)) {
                gpio_set_value(aic3x->gpio_reset, 0);
index b93fdc8d2d6fb76112eabc4fe2a6a302fa86b710..b0b48eb9c7c91578cb5d588955a0afa74f0ff262 100644 (file)
@@ -2905,6 +2905,8 @@ int wm_adsp2_event(struct snd_soc_dapm_widget *w,
                if (wm_adsp_fw[dsp->fw].num_caps != 0)
                        wm_adsp_buffer_free(dsp);
 
+               dsp->fatal_error = false;
+
                mutex_unlock(&dsp->pwr_lock);
 
                adsp_dbg(dsp, "Execution stopped\n");
@@ -3000,6 +3002,9 @@ static int wm_adsp_compr_attach(struct wm_adsp_compr *compr)
 {
        struct wm_adsp_compr_buf *buf = NULL, *tmp;
 
+       if (compr->dsp->fatal_error)
+               return -EINVAL;
+
        list_for_each_entry(tmp, &compr->dsp->buffer_list, list) {
                if (!tmp->name || !strcmp(compr->name, tmp->name)) {
                        buf = tmp;
@@ -3535,11 +3540,11 @@ static int wm_adsp_buffer_get_error(struct wm_adsp_compr_buf *buf)
 
        ret = wm_adsp_buffer_read(buf, HOST_BUFFER_FIELD(error), &buf->error);
        if (ret < 0) {
-               adsp_err(buf->dsp, "Failed to check buffer error: %d\n", ret);
+               compr_err(buf, "Failed to check buffer error: %d\n", ret);
                return ret;
        }
        if (buf->error != 0) {
-               adsp_err(buf->dsp, "Buffer error occurred: %d\n", buf->error);
+               compr_err(buf, "Buffer error occurred: %d\n", buf->error);
                return -EIO;
        }
 
@@ -3571,8 +3576,6 @@ int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd)
                if (ret < 0)
                        break;
 
-               wm_adsp_buffer_clear(compr->buf);
-
                /* Trigger the IRQ at one fragment of data */
                ret = wm_adsp_buffer_write(compr->buf,
                                           HOST_BUFFER_FIELD(high_water_mark),
@@ -3584,6 +3587,8 @@ int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd)
                }
                break;
        case SNDRV_PCM_TRIGGER_STOP:
+               if (wm_adsp_compr_attached(compr))
+                       wm_adsp_buffer_clear(compr->buf);
                break;
        default:
                ret = -EINVAL;
@@ -3917,22 +3922,40 @@ int wm_adsp2_lock(struct wm_adsp *dsp, unsigned int lock_regions)
 }
 EXPORT_SYMBOL_GPL(wm_adsp2_lock);
 
+static void wm_adsp_fatal_error(struct wm_adsp *dsp)
+{
+       struct wm_adsp_compr *compr;
+
+       dsp->fatal_error = true;
+
+       list_for_each_entry(compr, &dsp->compr_list, list) {
+               if (compr->stream) {
+                       snd_compr_stop_error(compr->stream,
+                                            SNDRV_PCM_STATE_XRUN);
+                       snd_compr_fragment_elapsed(compr->stream);
+               }
+       }
+}
+
 irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
 {
        unsigned int val;
        struct regmap *regmap = dsp->regmap;
        int ret = 0;
 
+       mutex_lock(&dsp->pwr_lock);
+
        ret = regmap_read(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL, &val);
        if (ret) {
                adsp_err(dsp,
                        "Failed to read Region Lock Ctrl register: %d\n", ret);
-               return IRQ_HANDLED;
+               goto error;
        }
 
        if (val & ADSP2_WDT_TIMEOUT_STS_MASK) {
                adsp_err(dsp, "watchdog timeout error\n");
                wm_adsp_stop_watchdog(dsp);
+               wm_adsp_fatal_error(dsp);
        }
 
        if (val & (ADSP2_SLAVE_ERR_MASK | ADSP2_REGION_LOCK_ERR_MASK)) {
@@ -3946,7 +3969,7 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
                        adsp_err(dsp,
                                 "Failed to read Bus Err Addr register: %d\n",
                                 ret);
-                       return IRQ_HANDLED;
+                       goto error;
                }
 
                adsp_err(dsp, "bus error address = 0x%x\n",
@@ -3959,7 +3982,7 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
                        adsp_err(dsp,
                                 "Failed to read Pmem Xmem Err Addr register: %d\n",
                                 ret);
-                       return IRQ_HANDLED;
+                       goto error;
                }
 
                adsp_err(dsp, "xmem error address = 0x%x\n",
@@ -3972,6 +3995,9 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
        regmap_update_bits(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL,
                           ADSP2_CTRL_ERR_EINT, ADSP2_CTRL_ERR_EINT);
 
+error:
+       mutex_unlock(&dsp->pwr_lock);
+
        return IRQ_HANDLED;
 }
 EXPORT_SYMBOL_GPL(wm_adsp2_bus_error);
index 59e07ad163296c3ff21f238ccdfca96b6af43c76..8f09b4419a914ae773558c6175529b1acebf9a82 100644 (file)
@@ -85,6 +85,7 @@ struct wm_adsp {
        bool preloaded;
        bool booted;
        bool running;
+       bool fatal_error;
 
        struct list_head ctl_list;
 
index 528e8b108422971eea52655b55642ac5cdbe575d..0b937924d2e47961d697d068edf3944772058baa 100644 (file)
@@ -445,6 +445,19 @@ struct dma_chan *fsl_asrc_get_dma_channel(struct fsl_asrc_pair *pair, bool dir)
 }
 EXPORT_SYMBOL_GPL(fsl_asrc_get_dma_channel);
 
+static int fsl_asrc_dai_startup(struct snd_pcm_substream *substream,
+                               struct snd_soc_dai *dai)
+{
+       struct fsl_asrc *asrc_priv = snd_soc_dai_get_drvdata(dai);
+
+       /* Odd channel number is not valid for older ASRC (channel_bits==3) */
+       if (asrc_priv->channel_bits == 3)
+               snd_pcm_hw_constraint_step(substream->runtime, 0,
+                                          SNDRV_PCM_HW_PARAM_CHANNELS, 2);
+
+       return 0;
+}
+
 static int fsl_asrc_dai_hw_params(struct snd_pcm_substream *substream,
                                  struct snd_pcm_hw_params *params,
                                  struct snd_soc_dai *dai)
@@ -539,6 +552,7 @@ static int fsl_asrc_dai_trigger(struct snd_pcm_substream *substream, int cmd,
 }
 
 static const struct snd_soc_dai_ops fsl_asrc_dai_ops = {
+       .startup      = fsl_asrc_dai_startup,
        .hw_params    = fsl_asrc_dai_hw_params,
        .hw_free      = fsl_asrc_dai_hw_free,
        .trigger      = fsl_asrc_dai_trigger,
index afe67c865330e39c7b3d1b30bd6127dd764f42aa..3623aa9a6f2ea7838e2c855a5d88681436ac11c1 100644 (file)
@@ -54,6 +54,8 @@ struct fsl_esai {
        u32 fifo_depth;
        u32 slot_width;
        u32 slots;
+       u32 tx_mask;
+       u32 rx_mask;
        u32 hck_rate[2];
        u32 sck_rate[2];
        bool hck_dir[2];
@@ -361,21 +363,13 @@ static int fsl_esai_set_dai_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask,
        regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR,
                           ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
 
-       regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMA,
-                          ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(tx_mask));
-       regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMB,
-                          ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(tx_mask));
-
        regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
                           ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
 
-       regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMA,
-                          ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(rx_mask));
-       regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMB,
-                          ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(rx_mask));
-
        esai_priv->slot_width = slot_width;
        esai_priv->slots = slots;
+       esai_priv->tx_mask = tx_mask;
+       esai_priv->rx_mask = rx_mask;
 
        return 0;
 }
@@ -596,6 +590,7 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
        bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
        u8 i, channels = substream->runtime->channels;
        u32 pins = DIV_ROUND_UP(channels, esai_priv->slots);
+       u32 mask;
 
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
@@ -608,15 +603,38 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
                for (i = 0; tx && i < channels; i++)
                        regmap_write(esai_priv->regmap, REG_ESAI_ETDR, 0x0);
 
+               /*
+                * When set the TE/RE in the end of enablement flow, there
+                * will be channel swap issue for multi data line case.
+                * In order to workaround this issue, we switch the bit
+                * enablement sequence to below sequence
+                * 1) clear the xSMB & xSMA: which is done in probe and
+                *                           stop state.
+                * 2) set TE/RE
+                * 3) set xSMB
+                * 4) set xSMA:  xSMA is the last one in this flow, which
+                *               will trigger esai to start.
+                */
                regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
                                   tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK,
                                   tx ? ESAI_xCR_TE(pins) : ESAI_xCR_RE(pins));
+               mask = tx ? esai_priv->tx_mask : esai_priv->rx_mask;
+
+               regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
+                                  ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(mask));
+               regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
+                                  ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(mask));
+
                break;
        case SNDRV_PCM_TRIGGER_SUSPEND:
        case SNDRV_PCM_TRIGGER_STOP:
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
                regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
                                   tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK, 0);
+               regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
+                                  ESAI_xSMA_xS_MASK, 0);
+               regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
+                                  ESAI_xSMB_xS_MASK, 0);
 
                /* Disable and reset FIFO */
                regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx),
@@ -906,6 +924,15 @@ static int fsl_esai_probe(struct platform_device *pdev)
                return ret;
        }
 
+       esai_priv->tx_mask = 0xFFFFFFFF;
+       esai_priv->rx_mask = 0xFFFFFFFF;
+
+       /* Clear the TSMA, TSMB, RSMA, RSMB */
+       regmap_write(esai_priv->regmap, REG_ESAI_TSMA, 0);
+       regmap_write(esai_priv->regmap, REG_ESAI_TSMB, 0);
+       regmap_write(esai_priv->regmap, REG_ESAI_RSMA, 0);
+       regmap_write(esai_priv->regmap, REG_ESAI_RSMB, 0);
+
        ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component,
                                              &fsl_esai_dai, 1);
        if (ret) {
index bb12351330e8c0f307b0237ee2e2f1b6b2f9aeec..69bc4848d7876cec544d4ab5067230092953b806 100644 (file)
@@ -20,6 +20,8 @@
 #include <linux/string.h>
 #include <sound/simple_card_utils.h>
 
+#define DPCM_SELECTABLE 1
+
 struct graph_priv {
        struct snd_soc_card snd_card;
        struct graph_dai_props {
@@ -440,6 +442,7 @@ static int graph_for_each_link(struct graph_priv *priv,
        struct device_node *codec_port;
        struct device_node *codec_port_old = NULL;
        struct asoc_simple_card_data adata;
+       uintptr_t dpcm_selectable = (uintptr_t)of_device_get_match_data(dev);
        int rc, ret;
 
        /* loop for all listed CPU port */
@@ -470,8 +473,9 @@ static int graph_for_each_link(struct graph_priv *priv,
                         * if Codec port has many endpoints,
                         * or has convert-xxx property
                         */
-                       if ((of_get_child_count(codec_port) > 1) ||
-                           adata.convert_rate || adata.convert_channels)
+                       if (dpcm_selectable &&
+                           ((of_get_child_count(codec_port) > 1) ||
+                            adata.convert_rate || adata.convert_channels))
                                ret = func_dpcm(priv, cpu_ep, codec_ep, li,
                                                (codec_port_old == codec_port));
                        /* else normal sound */
@@ -732,7 +736,8 @@ static int graph_remove(struct platform_device *pdev)
 
 static const struct of_device_id graph_of_match[] = {
        { .compatible = "audio-graph-card", },
-       { .compatible = "audio-graph-scu-card", },
+       { .compatible = "audio-graph-scu-card",
+         .data = (void *)DPCM_SELECTABLE },
        {},
 };
 MODULE_DEVICE_TABLE(of, graph_of_match);
index 7147bba45a2a61b0830ed49e6057edb3935c9c03..34de32efc4c4defd14c823b904931f50886b9c69 100644 (file)
@@ -9,12 +9,15 @@
 #include <linux/device.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/string.h>
 #include <sound/simple_card.h>
 #include <sound/soc-dai.h>
 #include <sound/soc.h>
 
+#define DPCM_SELECTABLE 1
+
 struct simple_priv {
        struct snd_soc_card snd_card;
        struct simple_dai_props {
@@ -441,6 +444,7 @@ static int simple_for_each_link(struct simple_priv *priv,
        struct device *dev = simple_priv_to_dev(priv);
        struct device_node *top = dev->of_node;
        struct device_node *node;
+       uintptr_t dpcm_selectable = (uintptr_t)of_device_get_match_data(dev);
        bool is_top = 0;
        int ret = 0;
 
@@ -480,8 +484,9 @@ static int simple_for_each_link(struct simple_priv *priv,
                         * if it has many CPUs,
                         * or has convert-xxx property
                         */
-                       if (num > 2 ||
-                           adata.convert_rate || adata.convert_channels)
+                       if (dpcm_selectable &&
+                           (num > 2 ||
+                            adata.convert_rate || adata.convert_channels))
                                ret = func_dpcm(priv, np, codec, li, is_top);
                        /* else normal sound */
                        else
@@ -822,7 +827,8 @@ static int simple_remove(struct platform_device *pdev)
 
 static const struct of_device_id simple_of_match[] = {
        { .compatible = "simple-audio-card", },
-       { .compatible = "simple-scu-audio-card", },
+       { .compatible = "simple-scu-audio-card",
+         .data = (void *)DPCM_SELECTABLE },
        {},
 };
 MODULE_DEVICE_TABLE(of, simple_of_match);
index 08cea5b5cda9fa9f6f617213c1fba5beef4bd489..0e8b1c5eec888b4c988206a04a3a8e1ca65e14aa 100644 (file)
@@ -706,9 +706,17 @@ static int sst_soc_probe(struct snd_soc_component *component)
        return sst_dsp_init_v2_dpcm(component);
 }
 
+static void sst_soc_remove(struct snd_soc_component *component)
+{
+       struct sst_data *drv = dev_get_drvdata(component->dev);
+
+       drv->soc_card = NULL;
+}
+
 static const struct snd_soc_component_driver sst_soc_platform_drv  = {
        .name           = DRV_NAME,
        .probe          = sst_soc_probe,
+       .remove         = sst_soc_remove,
        .ops            = &sst_platform_ops,
        .compr_ops      = &sst_platform_compr_ops,
        .pcm_new        = sst_pcm_new,
index 3263b0495853c2d57e22cb1a90d6cc7515deb952..c0e0844f75b9fe891fc7352c25e957c800f5e4b6 100644 (file)
@@ -43,6 +43,7 @@ struct cht_mc_private {
        struct clk *mclk;
        struct snd_soc_jack jack;
        bool ts3a227e_present;
+       int quirks;
 };
 
 static int platform_clock_control(struct snd_soc_dapm_widget *w,
@@ -54,6 +55,10 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
        struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
        int ret;
 
+       /* See the comment in snd_cht_mc_probe() */
+       if (ctx->quirks & QUIRK_PMC_PLT_CLK_0)
+               return 0;
+
        codec_dai = snd_soc_card_get_codec_dai(card, CHT_CODEC_DAI);
        if (!codec_dai) {
                dev_err(card->dev, "Codec dai not found; Unable to set platform clock\n");
@@ -223,6 +228,10 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
                        "jack detection gpios not added, error %d\n", ret);
        }
 
+       /* See the comment in snd_cht_mc_probe() */
+       if (ctx->quirks & QUIRK_PMC_PLT_CLK_0)
+               return 0;
+
        /*
         * The firmware might enable the clock at
         * boot (this information may or may not
@@ -423,16 +432,15 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
        const char *mclk_name;
        struct snd_soc_acpi_mach *mach;
        const char *platform_name;
-       int quirks = 0;
-
-       dmi_id = dmi_first_match(cht_max98090_quirk_table);
-       if (dmi_id)
-               quirks = (unsigned long)dmi_id->driver_data;
 
        drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
        if (!drv)
                return -ENOMEM;
 
+       dmi_id = dmi_first_match(cht_max98090_quirk_table);
+       if (dmi_id)
+               drv->quirks = (unsigned long)dmi_id->driver_data;
+
        drv->ts3a227e_present = acpi_dev_found("104C227E");
        if (!drv->ts3a227e_present) {
                /* no need probe TI jack detection chip */
@@ -458,7 +466,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
        snd_soc_card_cht.dev = &pdev->dev;
        snd_soc_card_set_drvdata(&snd_soc_card_cht, drv);
 
-       if (quirks & QUIRK_PMC_PLT_CLK_0)
+       if (drv->quirks & QUIRK_PMC_PLT_CLK_0)
                mclk_name = "pmc_plt_clk_0";
        else
                mclk_name = "pmc_plt_clk_3";
@@ -471,6 +479,21 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
                return PTR_ERR(drv->mclk);
        }
 
+       /*
+        * Boards which have the MAX98090's clk connected to clk_0 do not seem
+        * to like it if we muck with the clock. If we disable the clock when
+        * it is unused we get "max98090 i2c-193C9890:00: PLL unlocked" errors
+        * and the PLL never seems to lock again.
+        * So for these boards we enable it here once and leave it at that.
+        */
+       if (drv->quirks & QUIRK_PMC_PLT_CLK_0) {
+               ret_val = clk_prepare_enable(drv->mclk);
+               if (ret_val < 0) {
+                       dev_err(&pdev->dev, "MCLK enable error: %d\n", ret_val);
+                       return ret_val;
+               }
+       }
+
        ret_val = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_cht);
        if (ret_val) {
                dev_err(&pdev->dev,
@@ -481,11 +504,23 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
        return ret_val;
 }
 
+static int snd_cht_mc_remove(struct platform_device *pdev)
+{
+       struct snd_soc_card *card = platform_get_drvdata(pdev);
+       struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
+
+       if (ctx->quirks & QUIRK_PMC_PLT_CLK_0)
+               clk_disable_unprepare(ctx->mclk);
+
+       return 0;
+}
+
 static struct platform_driver snd_cht_mc_driver = {
        .driver = {
                .name = "cht-bsw-max98090",
        },
        .probe = snd_cht_mc_probe,
+       .remove = snd_cht_mc_remove,
 };
 
 module_platform_driver(snd_cht_mc_driver)
index 7044d8c2b187375cd6fd44b3342eedf4b37fd8d1..879f14257a3ea4c8fae5eaa683c365e519ecfc17 100644 (file)
@@ -405,7 +405,7 @@ static const struct snd_pcm_hw_constraint_list constraints_dmic_channels = {
 };
 
 static const unsigned int dmic_2ch[] = {
-       4,
+       2,
 };
 
 static const struct snd_pcm_hw_constraint_list constraints_dmic_2ch = {
index 28c4806b196a2fc3505ce88e298b834cf64e1c56..4bf70b4429f03075b07d877c67f6003d15d5336d 100644 (file)
@@ -483,6 +483,7 @@ static void skl_set_base_module_format(struct skl_sst *ctx,
        base_cfg->audio_fmt.bit_depth = format->bit_depth;
        base_cfg->audio_fmt.valid_bit_depth = format->valid_bit_depth;
        base_cfg->audio_fmt.ch_cfg = format->ch_cfg;
+       base_cfg->audio_fmt.sample_type = format->sample_type;
 
        dev_dbg(ctx->dev, "bit_depth=%x valid_bd=%x ch_config=%x\n",
                        format->bit_depth, format->valid_bit_depth,
index 1ae83f4ccc3615bfa42e28d08d43f27870526ded..9735e24122514f81d9ee88002fdbdd32ade65e4e 100644 (file)
@@ -181,6 +181,7 @@ int skl_pcm_link_dma_prepare(struct device *dev, struct skl_pipe_params *params)
        struct hdac_stream *hstream;
        struct hdac_ext_stream *stream;
        struct hdac_ext_link *link;
+       unsigned char stream_tag;
 
        hstream = snd_hdac_get_stream(bus, params->stream,
                                        params->link_dma_id + 1);
@@ -199,10 +200,13 @@ int skl_pcm_link_dma_prepare(struct device *dev, struct skl_pipe_params *params)
 
        snd_hdac_ext_link_stream_setup(stream, format_val);
 
-       list_for_each_entry(link, &bus->hlink_list, list) {
-               if (link->index == params->link_index)
-                       snd_hdac_ext_link_set_stream_id(link,
-                                       hstream->stream_tag);
+       stream_tag = hstream->stream_tag;
+       if (stream->hstream.direction == SNDRV_PCM_STREAM_PLAYBACK) {
+               list_for_each_entry(link, &bus->hlink_list, list) {
+                       if (link->index == params->link_index)
+                               snd_hdac_ext_link_set_stream_id(link,
+                                                               stream_tag);
+               }
        }
 
        stream->link_prepared = 1;
@@ -645,6 +649,7 @@ static int skl_link_hw_free(struct snd_pcm_substream *substream,
        struct hdac_ext_stream *link_dev =
                                snd_soc_dai_get_dma_data(dai, substream);
        struct hdac_ext_link *link;
+       unsigned char stream_tag;
 
        dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
 
@@ -654,7 +659,11 @@ static int skl_link_hw_free(struct snd_pcm_substream *substream,
        if (!link)
                return -EINVAL;
 
-       snd_hdac_ext_link_clear_stream_id(link, hdac_stream(link_dev)->stream_tag);
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+               stream_tag = hdac_stream(link_dev)->stream_tag;
+               snd_hdac_ext_link_clear_stream_id(link, stream_tag);
+       }
+
        snd_hdac_ext_stream_release(link_dev, HDAC_EXT_STREAM_TYPE_LINK);
        return 0;
 }
@@ -1453,13 +1462,20 @@ static int skl_platform_soc_probe(struct snd_soc_component *component)
        return 0;
 }
 
+static void skl_pcm_remove(struct snd_soc_component *component)
+{
+       /* remove topology */
+       snd_soc_tplg_component_remove(component, SND_SOC_TPLG_INDEX_ALL);
+}
+
 static const struct snd_soc_component_driver skl_component  = {
        .name           = "pcm",
        .probe          = skl_platform_soc_probe,
+       .remove         = skl_pcm_remove,
        .ops            = &skl_platform_ops,
        .pcm_new        = skl_pcm_new,
        .pcm_free       = skl_pcm_free,
-       .ignore_module_refcount = 1, /* do not increase the refcount in core */
+       .module_get_upon_open = 1, /* increment refcount when a pcm is opened */
 };
 
 int skl_platform_register(struct device *dev)
index 1b8bcdaf02d116cbc124aac6597f3a8a501b468d..9a163d7064d174ff1e4142cb09b31adeaa9c6f59 100644 (file)
@@ -49,6 +49,7 @@ enum bt_sco_state {
        BT_SCO_STATE_IDLE,
        BT_SCO_STATE_RUNNING,
        BT_SCO_STATE_ENDING,
+       BT_SCO_STATE_LOOPBACK,
 };
 
 enum bt_sco_direct {
@@ -486,7 +487,8 @@ static irqreturn_t mtk_btcvsd_snd_irq_handler(int irq_id, void *dev)
        if (bt->rx->state != BT_SCO_STATE_RUNNING &&
            bt->rx->state != BT_SCO_STATE_ENDING &&
            bt->tx->state != BT_SCO_STATE_RUNNING &&
-           bt->tx->state != BT_SCO_STATE_ENDING) {
+           bt->tx->state != BT_SCO_STATE_ENDING &&
+           bt->tx->state != BT_SCO_STATE_LOOPBACK) {
                dev_warn(bt->dev, "%s(), in idle state: rx->state: %d, tx->state: %d\n",
                         __func__, bt->rx->state, bt->tx->state);
                goto irq_handler_exit;
@@ -512,6 +514,42 @@ static irqreturn_t mtk_btcvsd_snd_irq_handler(int irq_id, void *dev)
        buf_cnt_tx = btsco_packet_info[packet_type][2];
        buf_cnt_rx = btsco_packet_info[packet_type][3];
 
+       if (bt->tx->state == BT_SCO_STATE_LOOPBACK) {
+               u8 *src, *dst;
+               unsigned long connsys_addr_rx, ap_addr_rx;
+               unsigned long connsys_addr_tx, ap_addr_tx;
+
+               connsys_addr_rx = *bt->bt_reg_pkt_r;
+               ap_addr_rx = (unsigned long)bt->bt_sram_bank2_base +
+                            (connsys_addr_rx & 0xFFFF);
+
+               connsys_addr_tx = *bt->bt_reg_pkt_w;
+               ap_addr_tx = (unsigned long)bt->bt_sram_bank2_base +
+                            (connsys_addr_tx & 0xFFFF);
+
+               if (connsys_addr_tx == 0xdeadfeed ||
+                   connsys_addr_rx == 0xdeadfeed) {
+                       /* bt return 0xdeadfeed if read reg during bt sleep */
+                       dev_warn(bt->dev, "%s(), connsys_addr_tx == 0xdeadfeed\n",
+                                __func__);
+                       goto irq_handler_exit;
+               }
+
+               src = (u8 *)ap_addr_rx;
+               dst = (u8 *)ap_addr_tx;
+
+               mtk_btcvsd_snd_data_transfer(BT_SCO_DIRECT_BT2ARM, src,
+                                            bt->tx->temp_packet_buf,
+                                            packet_length,
+                                            packet_num);
+               mtk_btcvsd_snd_data_transfer(BT_SCO_DIRECT_ARM2BT,
+                                            bt->tx->temp_packet_buf, dst,
+                                            packet_length,
+                                            packet_num);
+               bt->rx->rw_cnt++;
+               bt->tx->rw_cnt++;
+       }
+
        if (bt->rx->state == BT_SCO_STATE_RUNNING ||
            bt->rx->state == BT_SCO_STATE_ENDING) {
                if (bt->rx->xrun) {
@@ -1067,6 +1105,33 @@ static int btcvsd_band_set(struct snd_kcontrol *kcontrol,
        return 0;
 }
 
+static int btcvsd_loopback_get(struct snd_kcontrol *kcontrol,
+                              struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+       bool lpbk_en = bt->tx->state == BT_SCO_STATE_LOOPBACK;
+
+       ucontrol->value.integer.value[0] = lpbk_en;
+       return 0;
+}
+
+static int btcvsd_loopback_set(struct snd_kcontrol *kcontrol,
+                              struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+
+       if (ucontrol->value.integer.value[0]) {
+               mtk_btcvsd_snd_set_state(bt, bt->tx, BT_SCO_STATE_LOOPBACK);
+               mtk_btcvsd_snd_set_state(bt, bt->rx, BT_SCO_STATE_LOOPBACK);
+       } else {
+               mtk_btcvsd_snd_set_state(bt, bt->tx, BT_SCO_STATE_RUNNING);
+               mtk_btcvsd_snd_set_state(bt, bt->rx, BT_SCO_STATE_RUNNING);
+       }
+       return 0;
+}
+
 static int btcvsd_tx_mute_get(struct snd_kcontrol *kcontrol,
                              struct snd_ctl_elem_value *ucontrol)
 {
@@ -1202,6 +1267,8 @@ static int btcvsd_tx_timestamp_get(struct snd_kcontrol *kcontrol,
 static const struct snd_kcontrol_new mtk_btcvsd_snd_controls[] = {
        SOC_ENUM_EXT("BTCVSD Band", btcvsd_enum[0],
                     btcvsd_band_get, btcvsd_band_set),
+       SOC_SINGLE_BOOL_EXT("BTCVSD Loopback Switch", 0,
+                           btcvsd_loopback_get, btcvsd_loopback_set),
        SOC_SINGLE_BOOL_EXT("BTCVSD Tx Mute Switch", 0,
                            btcvsd_tx_mute_get, btcvsd_tx_mute_set),
        SOC_SINGLE_BOOL_EXT("BTCVSD Tx Irq Received Switch", 0,
index f523ad103acc4cc9ce2bd745a24aa6f5988cc7bf..48e81c5d52fc27959d8b8215a1903764bf302fdd 100644 (file)
@@ -605,6 +605,10 @@ void mt8183_mck_disable(struct mtk_base_afe *afe, int mck_id)
        int m_sel_id = mck_div[mck_id].m_sel_id;
        int div_clk_id = mck_div[mck_id].div_clk_id;
 
+       /* i2s5 mck not support */
+       if (mck_id == MT8183_I2S5_MCK)
+               return;
+
        clk_disable_unprepare(afe_priv->clk[div_clk_id]);
        if (m_sel_id >= 0)
                clk_disable_unprepare(afe_priv->clk[m_sel_id]);
index 400e29edb1c9c4db4d3afbbad2b6a41952100a22..d0b403a0e27b830bc480935fa75df123c3375302 100644 (file)
@@ -24,7 +24,7 @@
 
 #include "rockchip_pdm.h"
 
-#define PDM_DMA_BURST_SIZE     (16) /* size * width: 16*4 = 64 bytes */
+#define PDM_DMA_BURST_SIZE     (8) /* size * width: 8*4 = 32 bytes */
 
 struct rk_pdm_dev {
        struct device *dev;
@@ -208,7 +208,9 @@ static int rockchip_pdm_set_fmt(struct snd_soc_dai *cpu_dai,
                return -EINVAL;
        }
 
+       pm_runtime_get_sync(cpu_dai->dev);
        regmap_update_bits(pdm->regmap, PDM_CLK_CTRL, mask, val);
+       pm_runtime_put(cpu_dai->dev);
 
        return 0;
 }
index 4231001226f494da587a7096d27c001802fcdf9c..ab471d550d17adf682d0c5c26607e6de85885791 100644 (file)
@@ -1130,11 +1130,11 @@ static const struct snd_soc_dapm_widget samsung_i2s_widgets[] = {
 };
 
 static const struct snd_soc_dapm_route samsung_i2s_dapm_routes[] = {
-       { "Playback Mixer", NULL, "Primary" },
-       { "Playback Mixer", NULL, "Secondary" },
+       { "Playback Mixer", NULL, "Primary Playback" },
+       { "Playback Mixer", NULL, "Secondary Playback" },
 
        { "Mixer DAI TX", NULL, "Playback Mixer" },
-       { "Playback Mixer", NULL, "Mixer DAI RX" },
+       { "Primary Capture", NULL, "Mixer DAI RX" },
 };
 
 static const struct snd_soc_component_driver samsung_i2s_component = {
@@ -1155,7 +1155,8 @@ static int i2s_alloc_dais(struct samsung_i2s_priv *priv,
                          int num_dais)
 {
        static const char *dai_names[] = { "samsung-i2s", "samsung-i2s-sec" };
-       static const char *stream_names[] = { "Primary", "Secondary" };
+       static const char *stream_names[] = { "Primary Playback",
+                                             "Secondary Playback" };
        struct snd_soc_dai_driver *dai_drv;
        struct i2s_dai *dai;
        int i;
@@ -1201,6 +1202,7 @@ static int i2s_alloc_dais(struct samsung_i2s_priv *priv,
        dai_drv->capture.channels_max = 2;
        dai_drv->capture.rates = i2s_dai_data->pcm_rates;
        dai_drv->capture.formats = SAMSUNG_I2S_FMTS;
+       dai_drv->capture.stream_name = "Primary Capture";
 
        return 0;
 }
index 694512f980fdc207577fcd2a5a59d7a6484bfdd0..1dc54c4206f0adc1ed5250c2d9bf3f44f5c7adf2 100644 (file)
@@ -91,11 +91,11 @@ static int odroid_card_be_hw_params(struct snd_pcm_substream *substream,
                return ret;
 
        /*
-        *  We add 1 to the rclk_freq value in order to avoid too low clock
+        *  We add 2 to the rclk_freq value in order to avoid too low clock
         *  frequency values due to the EPLL output frequency not being exact
         *  multiple of the audio sampling rate.
         */
-       rclk_freq = params_rate(params) * rfs + 1;
+       rclk_freq = params_rate(params) * rfs + 2;
 
        ret = clk_set_rate(priv->sclk_i2s, rclk_freq);
        if (ret < 0)
index 022996d2db1301d16619ef8ead029a19fce1b75b..4fe83e611c01e0d983e5af5c79594d4c858ec610 100644 (file)
@@ -110,6 +110,8 @@ static const struct of_device_id rsnd_of_match[] = {
        { .compatible = "renesas,rcar_sound-gen1", .data = (void *)RSND_GEN1 },
        { .compatible = "renesas,rcar_sound-gen2", .data = (void *)RSND_GEN2 },
        { .compatible = "renesas,rcar_sound-gen3", .data = (void *)RSND_GEN3 },
+       /* Special Handling */
+       { .compatible = "renesas,rcar_sound-r8a77990", .data = (void *)(RSND_GEN3 | RSND_SOC_E) },
        {},
 };
 MODULE_DEVICE_TABLE(of, rsnd_of_match);
index 90625c57847b51281c5b38f81f8ab062a70a3785..0e6ef4e1840021d00c94089ca8522c956d4bb813 100644 (file)
@@ -607,6 +607,8 @@ struct rsnd_priv {
 #define RSND_GEN1      (1 << 0)
 #define RSND_GEN2      (2 << 0)
 #define RSND_GEN3      (3 << 0)
+#define RSND_SOC_MASK  (0xFF << 4)
+#define RSND_SOC_E     (1 << 4) /* E1/E2/E3 */
 
        /*
         * below value will be filled on rsnd_gen_probe()
@@ -679,6 +681,9 @@ struct rsnd_priv {
 #define rsnd_is_gen1(priv)     (((priv)->flags & RSND_GEN_MASK) == RSND_GEN1)
 #define rsnd_is_gen2(priv)     (((priv)->flags & RSND_GEN_MASK) == RSND_GEN2)
 #define rsnd_is_gen3(priv)     (((priv)->flags & RSND_GEN_MASK) == RSND_GEN3)
+#define rsnd_is_e3(priv)       (((priv)->flags & \
+                                       (RSND_GEN_MASK | RSND_SOC_MASK)) == \
+                                       (RSND_GEN3 | RSND_SOC_E))
 
 #define rsnd_flags_has(p, f) ((p)->flags & (f))
 #define rsnd_flags_set(p, f) ((p)->flags |= (f))
index db81e066b92ef98902d4433e939b25ab0a635625..585ffba0244b9f568685ed52323d31f7f9fcb3b4 100644 (file)
@@ -14,7 +14,6 @@
  */
 
 #include "rsnd.h"
-#include <linux/sys_soc.h>
 
 #define SRC_NAME "src"
 
@@ -135,7 +134,7 @@ unsigned int rsnd_src_get_rate(struct rsnd_priv *priv,
        return rate;
 }
 
-const static u32 bsdsr_table_pattern1[] = {
+static const u32 bsdsr_table_pattern1[] = {
        0x01800000, /* 6 - 1/6 */
        0x01000000, /* 6 - 1/4 */
        0x00c00000, /* 6 - 1/3 */
@@ -144,7 +143,7 @@ const static u32 bsdsr_table_pattern1[] = {
        0x00400000, /* 6 - 1   */
 };
 
-const static u32 bsdsr_table_pattern2[] = {
+static const u32 bsdsr_table_pattern2[] = {
        0x02400000, /* 6 - 1/6 */
        0x01800000, /* 6 - 1/4 */
        0x01200000, /* 6 - 1/3 */
@@ -153,7 +152,7 @@ const static u32 bsdsr_table_pattern2[] = {
        0x00600000, /* 6 - 1   */
 };
 
-const static u32 bsisr_table[] = {
+static const u32 bsisr_table[] = {
        0x00100060, /* 6 - 1/6 */
        0x00100040, /* 6 - 1/4 */
        0x00100030, /* 6 - 1/3 */
@@ -162,7 +161,7 @@ const static u32 bsisr_table[] = {
        0x00100020, /* 6 - 1   */
 };
 
-const static u32 chan288888[] = {
+static const u32 chan288888[] = {
        0x00000006, /* 1 to 2 */
        0x000001fe, /* 1 to 8 */
        0x000001fe, /* 1 to 8 */
@@ -171,7 +170,7 @@ const static u32 chan288888[] = {
        0x000001fe, /* 1 to 8 */
 };
 
-const static u32 chan244888[] = {
+static const u32 chan244888[] = {
        0x00000006, /* 1 to 2 */
        0x0000001e, /* 1 to 4 */
        0x0000001e, /* 1 to 4 */
@@ -180,7 +179,7 @@ const static u32 chan244888[] = {
        0x000001fe, /* 1 to 8 */
 };
 
-const static u32 chan222222[] = {
+static const u32 chan222222[] = {
        0x00000006, /* 1 to 2 */
        0x00000006, /* 1 to 2 */
        0x00000006, /* 1 to 2 */
@@ -189,18 +188,12 @@ const static u32 chan222222[] = {
        0x00000006, /* 1 to 2 */
 };
 
-static const struct soc_device_attribute ov_soc[] = {
-       { .soc_id = "r8a77990" }, /* E3 */
-       { /* sentinel */ }
-};
-
 static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
                                      struct rsnd_mod *mod)
 {
        struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
        struct device *dev = rsnd_priv_to_dev(priv);
        struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
-       const struct soc_device_attribute *soc = soc_device_match(ov_soc);
        int is_play = rsnd_io_is_play(io);
        int use_src = 0;
        u32 fin, fout;
@@ -307,7 +300,7 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
        /*
         * E3 need to overwrite
         */
-       if (soc)
+       if (rsnd_is_e3(priv))
                switch (rsnd_mod_id(mod)) {
                case 0:
                case 4:
index 93d316d5bf8e3cac63d9955cfa9e0a6b7bbbb9d0..46e3ab0fced47342be93c32f6509a79829967281 100644 (file)
@@ -947,7 +947,7 @@ static void soc_cleanup_component(struct snd_soc_component *component)
        snd_soc_dapm_free(snd_soc_component_get_dapm(component));
        soc_cleanup_component_debugfs(component);
        component->card = NULL;
-       if (!component->driver->ignore_module_refcount)
+       if (!component->driver->module_get_upon_open)
                module_put(component->dev->driver->owner);
 }
 
@@ -1381,7 +1381,7 @@ static int soc_probe_component(struct snd_soc_card *card,
                return 0;
        }
 
-       if (!component->driver->ignore_module_refcount &&
+       if (!component->driver->module_get_upon_open &&
            !try_module_get(component->dev->driver->owner))
                return -ENODEV;
 
@@ -2797,6 +2797,7 @@ int snd_soc_register_card(struct snd_soc_card *card)
 
                ret = soc_init_dai_link(card, link);
                if (ret) {
+                       soc_cleanup_platform(card);
                        dev_err(card->dev, "ASoC: failed to init link %s\n",
                                link->name);
                        mutex_unlock(&client_mutex);
@@ -2819,6 +2820,7 @@ int snd_soc_register_card(struct snd_soc_card *card)
        card->instantiated = 0;
        mutex_init(&card->mutex);
        mutex_init(&card->dapm_mutex);
+       spin_lock_init(&card->dpcm_lock);
 
        return snd_soc_bind_card(card);
 }
index 1ec06ef6d161606922b1a3f8d16e2e685129db62..0382a47b30bd8182d40340c839268233cd47e21b 100644 (file)
@@ -3650,6 +3650,13 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
        case snd_soc_dapm_dac:
        case snd_soc_dapm_aif_in:
        case snd_soc_dapm_pga:
+       case snd_soc_dapm_buffer:
+       case snd_soc_dapm_scheduler:
+       case snd_soc_dapm_effect:
+       case snd_soc_dapm_src:
+       case snd_soc_dapm_asrc:
+       case snd_soc_dapm_encoder:
+       case snd_soc_dapm_decoder:
        case snd_soc_dapm_out_drv:
        case snd_soc_dapm_micbias:
        case snd_soc_dapm_line:
@@ -3957,6 +3964,10 @@ snd_soc_dapm_free_kcontrol(struct snd_soc_card *card,
        int count;
 
        devm_kfree(card->dev, (void *)*private_value);
+
+       if (!w_param_text)
+               return;
+
        for (count = 0 ; count < num_params; count++)
                devm_kfree(card->dev, (void *)w_param_text[count]);
        devm_kfree(card->dev, w_param_text);
index 0d5ec68a1e50869e00ea6abb13b229c79b903329..be80a12fba27cc381b0438a95bb4987c6843cb39 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/delay.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/pm_runtime.h>
+#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/workqueue.h>
 #include <linux/export.h>
@@ -463,6 +464,9 @@ static int soc_pcm_components_close(struct snd_pcm_substream *substream,
                        continue;
 
                component->driver->ops->close(substream);
+
+               if (component->driver->module_get_upon_open)
+                       module_put(component->dev->driver->owner);
        }
 
        return 0;
@@ -513,6 +517,12 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
                    !component->driver->ops->open)
                        continue;
 
+               if (component->driver->module_get_upon_open &&
+                   !try_module_get(component->dev->driver->owner)) {
+                       ret = -ENODEV;
+                       goto module_err;
+               }
+
                ret = component->driver->ops->open(substream);
                if (ret < 0) {
                        dev_err(component->dev,
@@ -628,7 +638,7 @@ codec_dai_err:
 
 component_err:
        soc_pcm_components_close(substream, component);
-
+module_err:
        if (cpu_dai->driver->ops->shutdown)
                cpu_dai->driver->ops->shutdown(substream, cpu_dai);
 out:
@@ -954,10 +964,13 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
                codec_params = *params;
 
                /* fixup params based on TDM slot masks */
-               if (codec_dai->tx_mask)
+               if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
+                   codec_dai->tx_mask)
                        soc_pcm_codec_params_fixup(&codec_params,
                                                   codec_dai->tx_mask);
-               if (codec_dai->rx_mask)
+
+               if (substream->stream == SNDRV_PCM_STREAM_CAPTURE &&
+                   codec_dai->rx_mask)
                        soc_pcm_codec_params_fixup(&codec_params,
                                                   codec_dai->rx_mask);
 
@@ -1213,6 +1226,7 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
                struct snd_soc_pcm_runtime *be, int stream)
 {
        struct snd_soc_dpcm *dpcm;
+       unsigned long flags;
 
        /* only add new dpcms */
        for_each_dpcm_be(fe, stream, dpcm) {
@@ -1228,8 +1242,10 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
        dpcm->fe = fe;
        be->dpcm[stream].runtime = fe->dpcm[stream].runtime;
        dpcm->state = SND_SOC_DPCM_LINK_STATE_NEW;
+       spin_lock_irqsave(&fe->card->dpcm_lock, flags);
        list_add(&dpcm->list_be, &fe->dpcm[stream].be_clients);
        list_add(&dpcm->list_fe, &be->dpcm[stream].fe_clients);
+       spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
 
        dev_dbg(fe->dev, "connected new DPCM %s path %s %s %s\n",
                        stream ? "capture" : "playback",  fe->dai_link->name,
@@ -1275,6 +1291,7 @@ static void dpcm_be_reparent(struct snd_soc_pcm_runtime *fe,
 void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
 {
        struct snd_soc_dpcm *dpcm, *d;
+       unsigned long flags;
 
        for_each_dpcm_be_safe(fe, stream, dpcm, d) {
                dev_dbg(fe->dev, "ASoC: BE %s disconnect check for %s\n",
@@ -1294,8 +1311,10 @@ void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
 #ifdef CONFIG_DEBUG_FS
                debugfs_remove(dpcm->debugfs_state);
 #endif
+               spin_lock_irqsave(&fe->card->dpcm_lock, flags);
                list_del(&dpcm->list_be);
                list_del(&dpcm->list_fe);
+               spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
                kfree(dpcm);
        }
 }
@@ -1547,10 +1566,13 @@ int dpcm_process_paths(struct snd_soc_pcm_runtime *fe,
 void dpcm_clear_pending_state(struct snd_soc_pcm_runtime *fe, int stream)
 {
        struct snd_soc_dpcm *dpcm;
+       unsigned long flags;
 
+       spin_lock_irqsave(&fe->card->dpcm_lock, flags);
        for_each_dpcm_be(fe, stream, dpcm)
                dpcm->be->dpcm[stream].runtime_update =
                                                SND_SOC_DPCM_UPDATE_NO;
+       spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
 }
 
 static void dpcm_be_dai_startup_unwind(struct snd_soc_pcm_runtime *fe,
@@ -1899,10 +1921,15 @@ static int dpcm_apply_symmetry(struct snd_pcm_substream *fe_substream,
                struct snd_soc_pcm_runtime *be = dpcm->be;
                struct snd_pcm_substream *be_substream =
                        snd_soc_dpcm_get_substream(be, stream);
-               struct snd_soc_pcm_runtime *rtd = be_substream->private_data;
+               struct snd_soc_pcm_runtime *rtd;
                struct snd_soc_dai *codec_dai;
                int i;
 
+               /* A backend may not have the requested substream */
+               if (!be_substream)
+                       continue;
+
+               rtd = be_substream->private_data;
                if (rtd->dai_link->be_hw_params_fixup)
                        continue;
 
@@ -2571,6 +2598,7 @@ static int dpcm_run_update_startup(struct snd_soc_pcm_runtime *fe, int stream)
        struct snd_soc_dpcm *dpcm;
        enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream];
        int ret;
+       unsigned long flags;
 
        dev_dbg(fe->dev, "ASoC: runtime %s open on FE %s\n",
                        stream ? "capture" : "playback", fe->dai_link->name);
@@ -2640,11 +2668,13 @@ close:
        dpcm_be_dai_shutdown(fe, stream);
 disconnect:
        /* disconnect any non started BEs */
+       spin_lock_irqsave(&fe->card->dpcm_lock, flags);
        for_each_dpcm_be(fe, stream, dpcm) {
                struct snd_soc_pcm_runtime *be = dpcm->be;
                if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START)
                                dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
        }
+       spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
 
        return ret;
 }
@@ -3221,7 +3251,10 @@ int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
 {
        struct snd_soc_dpcm *dpcm;
        int state;
+       int ret = 1;
+       unsigned long flags;
 
+       spin_lock_irqsave(&fe->card->dpcm_lock, flags);
        for_each_dpcm_fe(be, stream, dpcm) {
 
                if (dpcm->fe == fe)
@@ -3230,12 +3263,15 @@ int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
                state = dpcm->fe->dpcm[stream].state;
                if (state == SND_SOC_DPCM_STATE_START ||
                        state == SND_SOC_DPCM_STATE_PAUSED ||
-                       state == SND_SOC_DPCM_STATE_SUSPEND)
-                       return 0;
+                       state == SND_SOC_DPCM_STATE_SUSPEND) {
+                       ret = 0;
+                       break;
+               }
        }
+       spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
 
        /* it's safe to free/stop this BE DAI */
-       return 1;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_free_stop);
 
@@ -3248,7 +3284,10 @@ int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe,
 {
        struct snd_soc_dpcm *dpcm;
        int state;
+       int ret = 1;
+       unsigned long flags;
 
+       spin_lock_irqsave(&fe->card->dpcm_lock, flags);
        for_each_dpcm_fe(be, stream, dpcm) {
 
                if (dpcm->fe == fe)
@@ -3258,12 +3297,15 @@ int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe,
                if (state == SND_SOC_DPCM_STATE_START ||
                        state == SND_SOC_DPCM_STATE_PAUSED ||
                        state == SND_SOC_DPCM_STATE_SUSPEND ||
-                       state == SND_SOC_DPCM_STATE_PREPARE)
-                       return 0;
+                       state == SND_SOC_DPCM_STATE_PREPARE) {
+                       ret = 0;
+                       break;
+               }
        }
+       spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
 
        /* it's safe to change hw_params */
-       return 1;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_params);
 
@@ -3302,6 +3344,7 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
        struct snd_pcm_hw_params *params = &fe->dpcm[stream].hw_params;
        struct snd_soc_dpcm *dpcm;
        ssize_t offset = 0;
+       unsigned long flags;
 
        /* FE state */
        offset += snprintf(buf + offset, size - offset,
@@ -3329,6 +3372,7 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
                goto out;
        }
 
+       spin_lock_irqsave(&fe->card->dpcm_lock, flags);
        for_each_dpcm_be(fe, stream, dpcm) {
                struct snd_soc_pcm_runtime *be = dpcm->be;
                params = &dpcm->hw_params;
@@ -3349,7 +3393,7 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
                                params_channels(params),
                                params_rate(params));
        }
-
+       spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
 out:
        return offset;
 }
index 25fca7055464a894e5e260bf872de8c410136f56..96852d25061936e1f72b9c929686a1a740bdc6a8 100644 (file)
@@ -482,10 +482,11 @@ static void remove_widget(struct snd_soc_component *comp,
 
                        snd_ctl_remove(card, kcontrol);
 
-                       kfree(dobj->control.dvalues);
+                       /* free enum kcontrol's dvalues and dtexts */
+                       kfree(se->dobj.control.dvalues);
                        for (j = 0; j < se->items; j++)
-                               kfree(dobj->control.dtexts[j]);
-                       kfree(dobj->control.dtexts);
+                               kfree(se->dobj.control.dtexts[j]);
+                       kfree(se->dobj.control.dtexts);
 
                        kfree(se);
                        kfree(w->kcontrol_news[i].name);
index 47901983a6ff88706dd847376a42d5aeb2c3a4c6..78bed97347136974d3da6b8a09d3465eaafffbdd 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/clk.h>
 #include <linux/module.h>
+#include <linux/mutex.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
@@ -37,6 +38,8 @@ struct stm32_adfsdm_priv {
        /* PCM buffer */
        unsigned char *pcm_buff;
        unsigned int pos;
+
+       struct mutex lock; /* protect against race condition on iio state */
 };
 
 static const struct snd_pcm_hardware stm32_adfsdm_pcm_hw = {
@@ -62,10 +65,12 @@ static void stm32_adfsdm_shutdown(struct snd_pcm_substream *substream,
 {
        struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(dai);
 
+       mutex_lock(&priv->lock);
        if (priv->iio_active) {
                iio_channel_stop_all_cb(priv->iio_cb);
                priv->iio_active = false;
        }
+       mutex_unlock(&priv->lock);
 }
 
 static int stm32_adfsdm_dai_prepare(struct snd_pcm_substream *substream,
@@ -74,13 +79,19 @@ static int stm32_adfsdm_dai_prepare(struct snd_pcm_substream *substream,
        struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(dai);
        int ret;
 
+       mutex_lock(&priv->lock);
+       if (priv->iio_active) {
+               iio_channel_stop_all_cb(priv->iio_cb);
+               priv->iio_active = false;
+       }
+
        ret = iio_write_channel_attribute(priv->iio_ch,
                                          substream->runtime->rate, 0,
                                          IIO_CHAN_INFO_SAMP_FREQ);
        if (ret < 0) {
                dev_err(dai->dev, "%s: Failed to set %d sampling rate\n",
                        __func__, substream->runtime->rate);
-               return ret;
+               goto out;
        }
 
        if (!priv->iio_active) {
@@ -92,6 +103,9 @@ static int stm32_adfsdm_dai_prepare(struct snd_pcm_substream *substream,
                                __func__, ret);
        }
 
+out:
+       mutex_unlock(&priv->lock);
+
        return ret;
 }
 
@@ -291,6 +305,7 @@ MODULE_DEVICE_TABLE(of, stm32_adfsdm_of_match);
 static int stm32_adfsdm_probe(struct platform_device *pdev)
 {
        struct stm32_adfsdm_priv *priv;
+       struct snd_soc_component *component;
        int ret;
 
        priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -299,6 +314,7 @@ static int stm32_adfsdm_probe(struct platform_device *pdev)
 
        priv->dev = &pdev->dev;
        priv->dai_drv = stm32_adfsdm_dai;
+       mutex_init(&priv->lock);
 
        dev_set_drvdata(&pdev->dev, priv);
 
@@ -317,9 +333,15 @@ static int stm32_adfsdm_probe(struct platform_device *pdev)
        if (IS_ERR(priv->iio_cb))
                return PTR_ERR(priv->iio_cb);
 
-       ret = devm_snd_soc_register_component(&pdev->dev,
-                                             &stm32_adfsdm_soc_platform,
-                                             NULL, 0);
+       component = devm_kzalloc(&pdev->dev, sizeof(*component), GFP_KERNEL);
+       if (!component)
+               return -ENOMEM;
+#ifdef CONFIG_DEBUG_FS
+       component->debugfs_prefix = "pcm";
+#endif
+
+       ret = snd_soc_add_component(&pdev->dev, component,
+                                   &stm32_adfsdm_soc_platform, NULL, 0);
        if (ret < 0)
                dev_err(&pdev->dev, "%s: Failed to register PCM platform\n",
                        __func__);
@@ -327,12 +349,20 @@ static int stm32_adfsdm_probe(struct platform_device *pdev)
        return ret;
 }
 
+static int stm32_adfsdm_remove(struct platform_device *pdev)
+{
+       snd_soc_unregister_component(&pdev->dev);
+
+       return 0;
+}
+
 static struct platform_driver stm32_adfsdm_driver = {
        .driver = {
                   .name = STM32_ADFSDM_DRV_NAME,
                   .of_match_table = stm32_adfsdm_of_match,
                   },
        .probe = stm32_adfsdm_probe,
+       .remove = stm32_adfsdm_remove,
 };
 
 module_platform_driver(stm32_adfsdm_driver);
index 47c334de6b0966a4e5fd4a589e8ce6fd757c1fce..8968458eec62d6b796e2eaae3df508e03c375e6d 100644 (file)
@@ -281,7 +281,6 @@ static bool stm32_i2s_readable_reg(struct device *dev, unsigned int reg)
        case STM32_I2S_CFG2_REG:
        case STM32_I2S_IER_REG:
        case STM32_I2S_SR_REG:
-       case STM32_I2S_TXDR_REG:
        case STM32_I2S_RXDR_REG:
        case STM32_I2S_CGFR_REG:
                return true;
@@ -293,7 +292,7 @@ static bool stm32_i2s_readable_reg(struct device *dev, unsigned int reg)
 static bool stm32_i2s_volatile_reg(struct device *dev, unsigned int reg)
 {
        switch (reg) {
-       case STM32_I2S_TXDR_REG:
+       case STM32_I2S_SR_REG:
        case STM32_I2S_RXDR_REG:
                return true;
        default:
index 14c9591aae4260d94f6d19231aaa56ad436e5469..d68d62f12df56098214a94de25758598c9f7502f 100644 (file)
@@ -105,6 +105,7 @@ static int stm32_sai_set_sync(struct stm32_sai_data *sai_client,
        if (!pdev) {
                dev_err(&sai_client->pdev->dev,
                        "Device not found for node %pOFn\n", np_provider);
+               of_node_put(np_provider);
                return -ENODEV;
        }
 
@@ -113,19 +114,20 @@ static int stm32_sai_set_sync(struct stm32_sai_data *sai_client,
                dev_err(&sai_client->pdev->dev,
                        "SAI sync provider data not found\n");
                ret = -EINVAL;
-               goto out_put_dev;
+               goto error;
        }
 
        /* Configure sync client */
        ret = stm32_sai_sync_conf_client(sai_client, synci);
        if (ret < 0)
-               goto out_put_dev;
+               goto error;
 
        /* Configure sync provider */
        ret = stm32_sai_sync_conf_provider(sai_provider, synco);
 
-out_put_dev:
+error:
        put_device(&pdev->dev);
+       of_node_put(np_provider);
        return ret;
 }
 
index f9297228c41ce4412f86d9ba7010ba714d44d345..d7045aa520de56eb42d108a14e92aee810d15f08 100644 (file)
@@ -70,6 +70,7 @@
 #define SAI_IEC60958_STATUS_BYTES      24
 
 #define SAI_MCLK_NAME_LEN              32
+#define SAI_RATE_11K                   11025
 
 /**
  * struct stm32_sai_sub_data - private data of SAI sub block (block A or B)
  * @slot_mask: rx or tx active slots mask. set at init or at runtime
  * @data_size: PCM data width. corresponds to PCM substream width.
  * @spdif_frm_cnt: S/PDIF playback frame counter
- * @snd_aes_iec958: iec958 data
+ * @iec958: iec958 data
  * @ctrl_lock: control lock
+ * @irq_lock: prevent race condition with IRQ
  */
 struct stm32_sai_sub_data {
        struct platform_device *pdev;
@@ -133,6 +135,7 @@ struct stm32_sai_sub_data {
        unsigned int spdif_frm_cnt;
        struct snd_aes_iec958 iec958;
        struct mutex ctrl_lock; /* protect resources accessed by controls */
+       spinlock_t irq_lock; /* used to prevent race condition with IRQ */
 };
 
 enum stm32_sai_fifo_th {
@@ -307,6 +310,25 @@ static int stm32_sai_set_clk_div(struct stm32_sai_sub_data *sai,
        return ret;
 }
 
+static int stm32_sai_set_parent_clock(struct stm32_sai_sub_data *sai,
+                                     unsigned int rate)
+{
+       struct platform_device *pdev = sai->pdev;
+       struct clk *parent_clk = sai->pdata->clk_x8k;
+       int ret;
+
+       if (!(rate % SAI_RATE_11K))
+               parent_clk = sai->pdata->clk_x11k;
+
+       ret = clk_set_parent(sai->sai_ck, parent_clk);
+       if (ret)
+               dev_err(&pdev->dev, " Error %d setting sai_ck parent clock. %s",
+                       ret, ret == -EBUSY ?
+                       "Active stream rates conflict\n" : "\n");
+
+       return ret;
+}
+
 static long stm32_sai_mclk_round_rate(struct clk_hw *hw, unsigned long rate,
                                      unsigned long *prate)
 {
@@ -474,8 +496,10 @@ static irqreturn_t stm32_sai_isr(int irq, void *devid)
                status = SNDRV_PCM_STATE_XRUN;
        }
 
-       if (status != SNDRV_PCM_STATE_RUNNING)
+       spin_lock(&sai->irq_lock);
+       if (status != SNDRV_PCM_STATE_RUNNING && sai->substream)
                snd_pcm_stop_xrun(sai->substream);
+       spin_unlock(&sai->irq_lock);
 
        return IRQ_HANDLED;
 }
@@ -486,25 +510,29 @@ static int stm32_sai_set_sysclk(struct snd_soc_dai *cpu_dai,
        struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
        int ret;
 
-       if (dir == SND_SOC_CLOCK_OUT) {
+       if (dir == SND_SOC_CLOCK_OUT && sai->sai_mclk) {
                ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
                                         SAI_XCR1_NODIV,
                                         (unsigned int)~SAI_XCR1_NODIV);
                if (ret < 0)
                        return ret;
 
-               dev_dbg(cpu_dai->dev, "SAI MCLK frequency is %uHz\n", freq);
-               sai->mclk_rate = freq;
+               /* If master clock is used, set parent clock now */
+               ret = stm32_sai_set_parent_clock(sai, freq);
+               if (ret)
+                       return ret;
 
-               if (sai->sai_mclk) {
-                       ret = clk_set_rate_exclusive(sai->sai_mclk,
-                                                    sai->mclk_rate);
-                       if (ret) {
-                               dev_err(cpu_dai->dev,
-                                       "Could not set mclk rate\n");
-                               return ret;
-                       }
+               ret = clk_set_rate_exclusive(sai->sai_mclk, freq);
+               if (ret) {
+                       dev_err(cpu_dai->dev,
+                               ret == -EBUSY ?
+                               "Active streams have incompatible rates" :
+                               "Could not set mclk rate\n");
+                       return ret;
                }
+
+               dev_dbg(cpu_dai->dev, "SAI MCLK frequency is %uHz\n", freq);
+               sai->mclk_rate = freq;
        }
 
        return 0;
@@ -679,8 +707,19 @@ static int stm32_sai_startup(struct snd_pcm_substream *substream,
 {
        struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
        int imr, cr2, ret;
+       unsigned long flags;
 
+       spin_lock_irqsave(&sai->irq_lock, flags);
        sai->substream = substream;
+       spin_unlock_irqrestore(&sai->irq_lock, flags);
+
+       if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) {
+               snd_pcm_hw_constraint_mask64(substream->runtime,
+                                            SNDRV_PCM_HW_PARAM_FORMAT,
+                                            SNDRV_PCM_FMTBIT_S32_LE);
+               snd_pcm_hw_constraint_single(substream->runtime,
+                                            SNDRV_PCM_HW_PARAM_CHANNELS, 2);
+       }
 
        ret = clk_prepare_enable(sai->sai_ck);
        if (ret < 0) {
@@ -898,14 +937,16 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai,
                                     struct snd_pcm_hw_params *params)
 {
        struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
-       int div = 0;
+       int div = 0, cr1 = 0;
        int sai_clk_rate, mclk_ratio, den;
        unsigned int rate = params_rate(params);
+       int ret;
 
-       if (!(rate % 11025))
-               clk_set_parent(sai->sai_ck, sai->pdata->clk_x11k);
-       else
-               clk_set_parent(sai->sai_ck, sai->pdata->clk_x8k);
+       if (!sai->sai_mclk) {
+               ret = stm32_sai_set_parent_clock(sai, rate);
+               if (ret)
+                       return ret;
+       }
        sai_clk_rate = clk_get_rate(sai->sai_ck);
 
        if (STM_SAI_IS_F4(sai->pdata)) {
@@ -943,13 +984,19 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai,
                } else {
                        if (sai->mclk_rate) {
                                mclk_ratio = sai->mclk_rate / rate;
-                               if ((mclk_ratio != 512) &&
-                                   (mclk_ratio != 256)) {
+                               if (mclk_ratio == 512) {
+                                       cr1 = SAI_XCR1_OSR;
+                               } else if (mclk_ratio != 256) {
                                        dev_err(cpu_dai->dev,
                                                "Wrong mclk ratio %d\n",
                                                mclk_ratio);
                                        return -EINVAL;
                                }
+
+                               regmap_update_bits(sai->regmap,
+                                                  STM_SAI_CR1_REGX,
+                                                  SAI_XCR1_OSR, cr1);
+
                                div = stm32_sai_get_clk_div(sai, sai_clk_rate,
                                                            sai->mclk_rate);
                                if (div < 0)
@@ -1051,28 +1098,36 @@ static void stm32_sai_shutdown(struct snd_pcm_substream *substream,
                               struct snd_soc_dai *cpu_dai)
 {
        struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
+       unsigned long flags;
 
        regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX, SAI_XIMR_MASK, 0);
 
        regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, SAI_XCR1_NODIV,
                           SAI_XCR1_NODIV);
 
-       clk_disable_unprepare(sai->sai_ck);
+       /* Release mclk rate only if rate was actually set */
+       if (sai->mclk_rate) {
+               clk_rate_exclusive_put(sai->sai_mclk);
+               sai->mclk_rate = 0;
+       }
 
-       clk_rate_exclusive_put(sai->sai_mclk);
+       clk_disable_unprepare(sai->sai_ck);
 
+       spin_lock_irqsave(&sai->irq_lock, flags);
        sai->substream = NULL;
+       spin_unlock_irqrestore(&sai->irq_lock, flags);
 }
 
 static int stm32_sai_pcm_new(struct snd_soc_pcm_runtime *rtd,
                             struct snd_soc_dai *cpu_dai)
 {
        struct stm32_sai_sub_data *sai = dev_get_drvdata(cpu_dai->dev);
+       struct snd_kcontrol_new knew = iec958_ctls;
 
        if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) {
                dev_dbg(&sai->pdev->dev, "%s: register iec controls", __func__);
-               return snd_ctl_add(rtd->pcm->card,
-                                  snd_ctl_new1(&iec958_ctls, sai));
+               knew.device = rtd->pcm->device;
+               return snd_ctl_add(rtd->pcm->card, snd_ctl_new1(&knew, sai));
        }
 
        return 0;
@@ -1081,7 +1136,7 @@ static int stm32_sai_pcm_new(struct snd_soc_pcm_runtime *rtd,
 static int stm32_sai_dai_probe(struct snd_soc_dai *cpu_dai)
 {
        struct stm32_sai_sub_data *sai = dev_get_drvdata(cpu_dai->dev);
-       int cr1 = 0, cr1_mask;
+       int cr1 = 0, cr1_mask, ret;
 
        sai->cpu_dai = cpu_dai;
 
@@ -1111,8 +1166,10 @@ static int stm32_sai_dai_probe(struct snd_soc_dai *cpu_dai)
        /* Configure synchronization */
        if (sai->sync == SAI_SYNC_EXTERNAL) {
                /* Configure synchro client and provider */
-               sai->pdata->set_sync(sai->pdata, sai->np_sync_provider,
-                                    sai->synco, sai->synci);
+               ret = sai->pdata->set_sync(sai->pdata, sai->np_sync_provider,
+                                          sai->synco, sai->synci);
+               if (ret)
+                       return ret;
        }
 
        cr1_mask |= SAI_XCR1_SYNCEN_MASK;
@@ -1392,7 +1449,6 @@ static int stm32_sai_sub_dais_init(struct platform_device *pdev,
        if (!sai->cpu_dai_drv)
                return -ENOMEM;
 
-       sai->cpu_dai_drv->name = dev_name(&pdev->dev);
        if (STM_SAI_IS_PLAYBACK(sai)) {
                memcpy(sai->cpu_dai_drv, &stm32_sai_playback_dai,
                       sizeof(stm32_sai_playback_dai));
@@ -1402,6 +1458,7 @@ static int stm32_sai_sub_dais_init(struct platform_device *pdev,
                       sizeof(stm32_sai_capture_dai));
                sai->cpu_dai_drv->capture.stream_name = sai->cpu_dai_drv->name;
        }
+       sai->cpu_dai_drv->name = dev_name(&pdev->dev);
 
        return 0;
 }
@@ -1424,6 +1481,7 @@ static int stm32_sai_sub_probe(struct platform_device *pdev)
 
        sai->pdev = pdev;
        mutex_init(&sai->ctrl_lock);
+       spin_lock_init(&sai->irq_lock);
        platform_set_drvdata(pdev, sai);
 
        sai->pdata = dev_get_drvdata(pdev->dev.parent);
index a7f413cb704dc7154c42c5adf29ed242d632cfc2..b14ab512c2ce0d4ef2aceae5d673e6b528e1120c 100644 (file)
@@ -441,7 +441,7 @@ static int shbuf_setup_backstore(struct xen_snd_front_pcm_stream_info *stream,
 {
        int i;
 
-       stream->buffer = alloc_pages_exact(stream->buffer_sz, GFP_KERNEL);
+       stream->buffer = alloc_pages_exact(buffer_sz, GFP_KERNEL);
        if (!stream->buffer)
                return -ENOMEM;
 
index 404d4b9ffe7644553a1b59fba043b151d935a2e9..df1153cea0b7ee2a27e19682837f81922fef353e 100644 (file)
@@ -32,6 +32,7 @@
 
 #ifndef __KERNEL__
 #include <stdlib.h>
+#include <time.h>
 #endif
 
 /*
index 512306a37531d829b880ebbe841aa87c1949b28f..0f257139b003e117eee62b82c87e2f66a20f8e36 100644 (file)
 #include "liburing.h"
 #include "barrier.h"
 
-#ifndef IOCQE_FLAG_CACHEHIT
-#define IOCQE_FLAG_CACHEHIT    (1U << 0)
-#endif
-
 #define min(a, b)              ((a < b) ? (a) : (b))
 
 struct io_sq_ring {
@@ -85,7 +81,6 @@ struct submitter {
        unsigned long reaps;
        unsigned long done;
        unsigned long calls;
-       unsigned long cachehit, cachemiss;
        volatile int finish;
 
        __s32 *fds;
@@ -270,10 +265,6 @@ static int reap_events(struct submitter *s)
                                return -1;
                        }
                }
-               if (cqe->flags & IOCQE_FLAG_CACHEHIT)
-                       s->cachehit++;
-               else
-                       s->cachemiss++;
                reaped++;
                head++;
        } while (1);
@@ -489,7 +480,7 @@ static void file_depths(char *buf)
 int main(int argc, char *argv[])
 {
        struct submitter *s = &submitters[0];
-       unsigned long done, calls, reap, cache_hit, cache_miss;
+       unsigned long done, calls, reap;
        int err, i, flags, fd;
        char *fdepths;
        void *ret;
@@ -569,44 +560,29 @@ int main(int argc, char *argv[])
        pthread_create(&s->thread, NULL, submitter_fn, s);
 
        fdepths = malloc(8 * s->nr_files);
-       cache_hit = cache_miss = reap = calls = done = 0;
+       reap = calls = done = 0;
        do {
                unsigned long this_done = 0;
                unsigned long this_reap = 0;
                unsigned long this_call = 0;
-               unsigned long this_cache_hit = 0;
-               unsigned long this_cache_miss = 0;
                unsigned long rpc = 0, ipc = 0;
-               double hit = 0.0;
 
                sleep(1);
                this_done += s->done;
                this_call += s->calls;
                this_reap += s->reaps;
-               this_cache_hit += s->cachehit;
-               this_cache_miss += s->cachemiss;
-               if (this_cache_hit && this_cache_miss) {
-                       unsigned long hits, total;
-
-                       hits = this_cache_hit - cache_hit;
-                       total = hits + this_cache_miss - cache_miss;
-                       hit = (double) hits / (double) total;
-                       hit *= 100.0;
-               }
                if (this_call - calls) {
                        rpc = (this_done - done) / (this_call - calls);
                        ipc = (this_reap - reap) / (this_call - calls);
                } else
                        rpc = ipc = -1;
                file_depths(fdepths);
-               printf("IOPS=%lu, IOS/call=%ld/%ld, inflight=%u (%s), Cachehit=%0.2f%%\n",
+               printf("IOPS=%lu, IOS/call=%ld/%ld, inflight=%u (%s)\n",
                                this_done - done, rpc, ipc, s->inflight,
-                               fdepths, hit);
+                               fdepths);
                done = this_done;
                calls = this_call;
                reap = this_reap;
-               cache_hit = s->cachehit;
-               cache_miss = s->cachemiss;
        } while (!finish);
 
        pthread_join(s->thread, &ret);
index 5bf8e52c41fcaf2bb38127d4bb076a9164539ddb..8e7c56e9590fbb0dfa2d76780696206b166ec440 100644 (file)
@@ -177,7 +177,7 @@ $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
 
 $(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN)
        $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(VERSION) \
-                                   -Wl,--version-script=$(VERSION_SCRIPT) $^ -o $@
+                                   -Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -o $@
        @ln -sf $(@F) $(OUTPUT)libbpf.so
        @ln -sf $(@F) $(OUTPUT)libbpf.so.$(VERSION)
 
@@ -220,8 +220,9 @@ install_lib: all_cmd
 install_headers:
        $(call QUIET_INSTALL, headers) \
                $(call do_install,bpf.h,$(prefix)/include/bpf,644); \
-               $(call do_install,libbpf.h,$(prefix)/include/bpf,644);
-               $(call do_install,btf.h,$(prefix)/include/bpf,644);
+               $(call do_install,libbpf.h,$(prefix)/include/bpf,644); \
+               $(call do_install,btf.h,$(prefix)/include/bpf,644); \
+               $(call do_install,xsk.h,$(prefix)/include/bpf,644);
 
 install: install_lib
 
index 87e3020ac1bc8b3772d98ce58751fc2d6f979184..cf119c9b6f2700e790ec02e16b524f6b3a4cf581 100644 (file)
@@ -2107,6 +2107,9 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
                return fwd_kind == real_kind;
        }
 
+       if (cand_kind != canon_kind)
+               return 0;
+
        switch (cand_kind) {
        case BTF_KIND_INT:
                return btf_equal_int(cand_type, canon_type);
index 87494c7c619d85dd4199a31b7de4a2739ad678b8..981c6ce2da2c76cee9d6a3b96b37bf6fa5cdaa33 100644 (file)
@@ -2233,7 +2233,7 @@ eval_type_str(unsigned long long val, const char *type, int pointer)
                return val & 0xffffffff;
 
        if (strcmp(type, "u64") == 0 ||
-           strcmp(type, "s64"))
+           strcmp(type, "s64") == 0)
                return val;
 
        if (strcmp(type, "s8") == 0)
index 5dde107083c60bc8dd12538a2838de73c8b1470c..479196aeb4096efb0f0c03f6722cefcb1765326e 100644 (file)
@@ -165,6 +165,7 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
                "fortify_panic",
                "usercopy_abort",
                "machine_real_restart",
+               "rewind_stack_do_exit",
        };
 
        if (func->bind == STB_WEAK)
index 49ee3c2033ecbd8df8408445f141c8312f7efc44..c3625ec374e0658e66a3ed7beadbcbe19b5929c0 100644 (file)
@@ -1308,6 +1308,7 @@ static void init_features(struct perf_session *session)
        for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
                perf_header__set_feat(&session->header, feat);
 
+       perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
        perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
        perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
        perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
index 1999d6533d12a35e672e4caf8c98e24010191bcf..fbbb0da43abbad579f354ac909a6ffccec14b5ce 100644 (file)
@@ -1377,6 +1377,7 @@ int cmd_top(int argc, const char **argv)
                         * */
                        .overwrite      = 0,
                        .sample_time    = true,
+                       .sample_time_set = true,
                },
                .max_stack           = sysctl__max_stack(),
                .annotation_opts     = annotation__default_options,
index 3b71902a5a21124c6006100580245ceda1c72a23..bf271fbc3a885f509d78ee60fbb0138fd12df53f 100644 (file)
@@ -331,7 +331,7 @@ if perf_db_export_calls:
                        'return_id,'
                        'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,'
                        'parent_call_path_id,'
-                       'parent_id'
+                       'calls.parent_id'
                ' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
 
 do_query(query, 'CREATE VIEW samples_view AS '
index c6351b557bb0a9afb70d2ed4330c3496c3266a35..9494f9dc61ecac041e114ffa2553e287f416e30d 100644 (file)
@@ -57,9 +57,11 @@ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
                else if (prog_id > node->info_linear->info.id)
                        n = n->rb_right;
                else
-                       break;
+                       goto out;
        }
+       node = NULL;
 
+out:
        up_read(&env->bpf_progs.lock);
        return node;
 }
@@ -109,10 +111,12 @@ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
                else if (btf_id > node->id)
                        n = n->rb_right;
                else
-                       break;
+                       goto out;
        }
+       node = NULL;
 
        up_read(&env->bpf_progs.lock);
+out:
        return node;
 }
 
index 6689378ee577c18ca1efac4b95f84c5c45d40404..51ead577533fa6f6ee8c8cc49b8fc084490f0ef1 100644 (file)
@@ -1868,12 +1868,12 @@ static void *perf_evlist__poll_thread(void *arg)
 {
        struct perf_evlist *evlist = arg;
        bool draining = false;
-       int i;
+       int i, done = 0;
+
+       while (!done) {
+               bool got_data = false;
 
-       while (draining || !(evlist->thread.done)) {
-               if (draining)
-                       draining = false;
-               else if (evlist->thread.done)
+               if (evlist->thread.done)
                        draining = true;
 
                if (!draining)
@@ -1894,9 +1894,13 @@ static void *perf_evlist__poll_thread(void *arg)
                                        pr_warning("cannot locate proper evsel for the side band event\n");
 
                                perf_mmap__consume(map);
+                               got_data = true;
                        }
                        perf_mmap__read_done(map);
                }
+
+               if (draining && !got_data)
+                       break;
        }
        return NULL;
 }
index 66d066f18b5b2de290a3cc4c4a1c1caa29baaeb8..966360844fffbd10b4d97c318b22e0fc8446c1ee 100644 (file)
@@ -2368,7 +2368,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
                if (data->user_regs.abi) {
                        u64 mask = evsel->attr.sample_regs_user;
 
-                       sz = hweight_long(mask) * sizeof(u64);
+                       sz = hweight64(mask) * sizeof(u64);
                        OVERFLOW_CHECK(array, sz, max_size);
                        data->user_regs.mask = mask;
                        data->user_regs.regs = (u64 *)array;
@@ -2424,7 +2424,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
                if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
                        u64 mask = evsel->attr.sample_regs_intr;
 
-                       sz = hweight_long(mask) * sizeof(u64);
+                       sz = hweight64(mask) * sizeof(u64);
                        OVERFLOW_CHECK(array, sz, max_size);
                        data->intr_regs.mask = mask;
                        data->intr_regs.regs = (u64 *)array;
@@ -2552,7 +2552,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
        if (type & PERF_SAMPLE_REGS_USER) {
                if (sample->user_regs.abi) {
                        result += sizeof(u64);
-                       sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
+                       sz = hweight64(sample->user_regs.mask) * sizeof(u64);
                        result += sz;
                } else {
                        result += sizeof(u64);
@@ -2580,7 +2580,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
        if (type & PERF_SAMPLE_REGS_INTR) {
                if (sample->intr_regs.abi) {
                        result += sizeof(u64);
-                       sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
+                       sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
                        result += sz;
                } else {
                        result += sizeof(u64);
@@ -2710,7 +2710,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
        if (type & PERF_SAMPLE_REGS_USER) {
                if (sample->user_regs.abi) {
                        *array++ = sample->user_regs.abi;
-                       sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
+                       sz = hweight64(sample->user_regs.mask) * sizeof(u64);
                        memcpy(array, sample->user_regs.regs, sz);
                        array = (void *)array + sz;
                } else {
@@ -2746,7 +2746,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
        if (type & PERF_SAMPLE_REGS_INTR) {
                if (sample->intr_regs.abi) {
                        *array++ = sample->intr_regs.abi;
-                       sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
+                       sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
                        memcpy(array, sample->intr_regs.regs, sz);
                        array = (void *)array + sz;
                } else {
index b9e693825873a8459c055a62cfcf1aefb289c99a..2d2af2ac2b1e976041b5e05eca4374c6b424b8fa 100644 (file)
@@ -2606,6 +2606,7 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
                perf_env__insert_bpf_prog_info(env, info_node);
        }
 
+       up_write(&env->bpf_progs.lock);
        return 0;
 out:
        free(info_linear);
@@ -2623,7 +2624,9 @@ static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data _
 static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
 {
        struct perf_env *env = &ff->ph->env;
+       struct btf_node *node = NULL;
        u32 count, i;
+       int err = -1;
 
        if (ff->ph->needs_swap) {
                pr_warning("interpreting btf from systems with endianity is not yet supported\n");
@@ -2636,31 +2639,32 @@ static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
        down_write(&env->bpf_progs.lock);
 
        for (i = 0; i < count; ++i) {
-               struct btf_node *node;
                u32 id, data_size;
 
                if (do_read_u32(ff, &id))
-                       return -1;
+                       goto out;
                if (do_read_u32(ff, &data_size))
-                       return -1;
+                       goto out;
 
                node = malloc(sizeof(struct btf_node) + data_size);
                if (!node)
-                       return -1;
+                       goto out;
 
                node->id = id;
                node->data_size = data_size;
 
-               if (__do_read(ff, node->data, data_size)) {
-                       free(node);
-                       return -1;
-               }
+               if (__do_read(ff, node->data, data_size))
+                       goto out;
 
                perf_env__insert_btf(env, node);
+               node = NULL;
        }
 
+       err = 0;
+out:
        up_write(&env->bpf_progs.lock);
-       return 0;
+       free(node);
+       return err;
 }
 
 struct feature_ops {
index e32628cd20a7f36e0e06efb3ee5c2e6cae09dabb..ee71efb9db62e676c8fab2a60c34d079ba7e7e7e 100644 (file)
@@ -261,6 +261,22 @@ bool __map__is_extra_kernel_map(const struct map *map)
        return kmap && kmap->name[0];
 }
 
+bool __map__is_bpf_prog(const struct map *map)
+{
+       const char *name;
+
+       if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
+               return true;
+
+       /*
+        * If PERF_RECORD_BPF_EVENT is not included, the dso will not have
+        * type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can
+        * guess the type based on name.
+        */
+       name = map->dso->short_name;
+       return name && (strstr(name, "bpf_prog_") == name);
+}
+
 bool map__has_symbols(const struct map *map)
 {
        return dso__has_symbols(map->dso);
@@ -910,10 +926,8 @@ static void __maps__insert_name(struct maps *maps, struct map *map)
                rc = strcmp(m->dso->short_name, map->dso->short_name);
                if (rc < 0)
                        p = &(*p)->rb_left;
-               else if (rc  > 0)
-                       p = &(*p)->rb_right;
                else
-                       return;
+                       p = &(*p)->rb_right;
        }
        rb_link_node(&map->rb_node_name, parent, p);
        rb_insert_color(&map->rb_node_name, &maps->names);
index 0e20749f2c55d533842171dd2b2a7262f02768d1..dc93787c74f01b65fa7fcc76388a57472db707e7 100644 (file)
@@ -159,10 +159,12 @@ int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name,
 
 bool __map__is_kernel(const struct map *map);
 bool __map__is_extra_kernel_map(const struct map *map);
+bool __map__is_bpf_prog(const struct map *map);
 
 static inline bool __map__is_kmodule(const struct map *map)
 {
-       return !__map__is_kernel(map) && !__map__is_extra_kernel_map(map);
+       return !__map__is_kernel(map) && !__map__is_extra_kernel_map(map) &&
+              !__map__is_bpf_prog(map);
 }
 
 bool map__has_symbols(const struct map *map);
index c3fad065c89c085b39da83de4a751041a99ae3d6..c7727be9719f4ea9b9524ddf1ff92daeebab8982 100644 (file)
@@ -44,6 +44,7 @@
 #include <cpuid.h>
 #include <linux/capability.h>
 #include <errno.h>
+#include <math.h>
 
 char *proc_stat = "/proc/stat";
 FILE *outf;
@@ -63,7 +64,6 @@ unsigned int dump_only;
 unsigned int do_snb_cstates;
 unsigned int do_knl_cstates;
 unsigned int do_slm_cstates;
-unsigned int do_cnl_cstates;
 unsigned int use_c1_residency_msr;
 unsigned int has_aperf;
 unsigned int has_epb;
@@ -141,9 +141,21 @@ unsigned int first_counter_read = 1;
 
 #define RAPL_CORES_ENERGY_STATUS       (1 << 9)
                                        /* 0x639 MSR_PP0_ENERGY_STATUS */
+#define RAPL_PER_CORE_ENERGY   (1 << 10)
+                                       /* Indicates cores energy collection is per-core,
+                                        * not per-package. */
+#define RAPL_AMD_F17H          (1 << 11)
+                                       /* 0xc0010299 MSR_RAPL_PWR_UNIT */
+                                       /* 0xc001029a MSR_CORE_ENERGY_STAT */
+                                       /* 0xc001029b MSR_PKG_ENERGY_STAT */
 #define RAPL_CORES (RAPL_CORES_ENERGY_STATUS | RAPL_CORES_POWER_LIMIT)
 #define        TJMAX_DEFAULT   100
 
+/* MSRs that are not yet in the kernel-provided header. */
+#define MSR_RAPL_PWR_UNIT      0xc0010299
+#define MSR_CORE_ENERGY_STAT   0xc001029a
+#define MSR_PKG_ENERGY_STAT    0xc001029b
+
 #define MAX(a, b) ((a) > (b) ? (a) : (b))
 
 /*
@@ -187,6 +199,7 @@ struct core_data {
        unsigned long long c7;
        unsigned long long mc6_us;      /* duplicate as per-core for now, even though per module */
        unsigned int core_temp_c;
+       unsigned int core_energy;       /* MSR_CORE_ENERGY_STAT */
        unsigned int core_id;
        unsigned long long counter[MAX_ADDED_COUNTERS];
 } *core_even, *core_odd;
@@ -273,6 +286,7 @@ struct system_summary {
 
 struct cpu_topology {
        int physical_package_id;
+       int die_id;
        int logical_cpu_id;
        int physical_node_id;
        int logical_node_id;    /* 0-based count within the package */
@@ -283,6 +297,7 @@ struct cpu_topology {
 
 struct topo_params {
        int num_packages;
+       int num_die;
        int num_cpus;
        int num_cores;
        int max_cpu_num;
@@ -314,9 +329,8 @@ int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg
        int retval, pkg_no, core_no, thread_no, node_no;
 
        for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
-               for (core_no = 0; core_no < topo.cores_per_node; ++core_no) {
-                       for (node_no = 0; node_no < topo.nodes_per_pkg;
-                            node_no++) {
+               for (node_no = 0; node_no < topo.nodes_per_pkg; node_no++) {
+                       for (core_no = 0; core_no < topo.cores_per_node; ++core_no) {
                                for (thread_no = 0; thread_no <
                                        topo.threads_per_core; ++thread_no) {
                                        struct thread_data *t;
@@ -442,6 +456,7 @@ struct msr_counter bic[] = {
        { 0x0, "CPU" },
        { 0x0, "APIC" },
        { 0x0, "X2APIC" },
+       { 0x0, "Die" },
 };
 
 #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
@@ -495,6 +510,7 @@ struct msr_counter bic[] = {
 #define        BIC_CPU         (1ULL << 47)
 #define        BIC_APIC        (1ULL << 48)
 #define        BIC_X2APIC      (1ULL << 49)
+#define        BIC_Die         (1ULL << 50)
 
 #define BIC_DISABLED_BY_DEFAULT        (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC)
 
@@ -621,6 +637,8 @@ void print_header(char *delim)
                outp += sprintf(outp, "%sTime_Of_Day_Seconds", (printed++ ? delim : ""));
        if (DO_BIC(BIC_Package))
                outp += sprintf(outp, "%sPackage", (printed++ ? delim : ""));
+       if (DO_BIC(BIC_Die))
+               outp += sprintf(outp, "%sDie", (printed++ ? delim : ""));
        if (DO_BIC(BIC_Node))
                outp += sprintf(outp, "%sNode", (printed++ ? delim : ""));
        if (DO_BIC(BIC_Core))
@@ -667,7 +685,7 @@ void print_header(char *delim)
 
        if (DO_BIC(BIC_CPU_c1))
                outp += sprintf(outp, "%sCPU%%c1", (printed++ ? delim : ""));
-       if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates)
+       if (DO_BIC(BIC_CPU_c3))
                outp += sprintf(outp, "%sCPU%%c3", (printed++ ? delim : ""));
        if (DO_BIC(BIC_CPU_c6))
                outp += sprintf(outp, "%sCPU%%c6", (printed++ ? delim : ""));
@@ -680,6 +698,14 @@ void print_header(char *delim)
        if (DO_BIC(BIC_CoreTmp))
                outp += sprintf(outp, "%sCoreTmp", (printed++ ? delim : ""));
 
+       if (do_rapl && !rapl_joules) {
+               if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY))
+                       outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : ""));
+       } else if (do_rapl && rapl_joules) {
+               if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY))
+                       outp += sprintf(outp, "%sCor_J", (printed++ ? delim : ""));
+       }
+
        for (mp = sys.cp; mp; mp = mp->next) {
                if (mp->format == FORMAT_RAW) {
                        if (mp->width == 64)
@@ -734,7 +760,7 @@ void print_header(char *delim)
        if (do_rapl && !rapl_joules) {
                if (DO_BIC(BIC_PkgWatt))
                        outp += sprintf(outp, "%sPkgWatt", (printed++ ? delim : ""));
-               if (DO_BIC(BIC_CorWatt))
+               if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY))
                        outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : ""));
                if (DO_BIC(BIC_GFXWatt))
                        outp += sprintf(outp, "%sGFXWatt", (printed++ ? delim : ""));
@@ -747,7 +773,7 @@ void print_header(char *delim)
        } else if (do_rapl && rapl_joules) {
                if (DO_BIC(BIC_Pkg_J))
                        outp += sprintf(outp, "%sPkg_J", (printed++ ? delim : ""));
-               if (DO_BIC(BIC_Cor_J))
+               if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY))
                        outp += sprintf(outp, "%sCor_J", (printed++ ? delim : ""));
                if (DO_BIC(BIC_GFX_J))
                        outp += sprintf(outp, "%sGFX_J", (printed++ ? delim : ""));
@@ -808,6 +834,7 @@ int dump_counters(struct thread_data *t, struct core_data *c,
                outp += sprintf(outp, "c6: %016llX\n", c->c6);
                outp += sprintf(outp, "c7: %016llX\n", c->c7);
                outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c);
+               outp += sprintf(outp, "Joules: %0X\n", c->core_energy);
 
                for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
                        outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n",
@@ -904,6 +931,8 @@ int format_counters(struct thread_data *t, struct core_data *c,
        if (t == &average.threads) {
                if (DO_BIC(BIC_Package))
                        outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+               if (DO_BIC(BIC_Die))
+                       outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
                if (DO_BIC(BIC_Node))
                        outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
                if (DO_BIC(BIC_Core))
@@ -921,6 +950,12 @@ int format_counters(struct thread_data *t, struct core_data *c,
                        else
                                outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
                }
+               if (DO_BIC(BIC_Die)) {
+                       if (c)
+                               outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), cpus[t->cpu_id].die_id);
+                       else
+                               outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+               }
                if (DO_BIC(BIC_Node)) {
                        if (t)
                                outp += sprintf(outp, "%s%d",
@@ -1003,7 +1038,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
        if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
                goto done;
 
-       if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates)
+       if (DO_BIC(BIC_CPU_c3))
                outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3/tsc);
        if (DO_BIC(BIC_CPU_c6))
                outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6/tsc);
@@ -1033,6 +1068,20 @@ int format_counters(struct thread_data *t, struct core_data *c,
                }
        }
 
+       /*
+        * If measurement interval exceeds minimum RAPL Joule Counter range,
+        * indicate that results are suspect by printing "**" in fraction place.
+        */
+       if (interval_float < rapl_joule_counter_range)
+               fmt8 = "%s%.2f";
+       else
+               fmt8 = "%6.0f**";
+
+       if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY))
+               outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units / interval_float);
+       if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY))
+               outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units);
+
        /* print per-package data only for 1st core in package */
        if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
                goto done;
@@ -1085,18 +1134,9 @@ int format_counters(struct thread_data *t, struct core_data *c,
        if (DO_BIC(BIC_SYS_LPI))
                outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->sys_lpi / 1000000.0 / interval_float);
 
-       /*
-        * If measurement interval exceeds minimum RAPL Joule Counter range,
-        * indicate that results are suspect by printing "**" in fraction place.
-        */
-       if (interval_float < rapl_joule_counter_range)
-               fmt8 = "%s%.2f";
-       else
-               fmt8 = "%6.0f**";
-
        if (DO_BIC(BIC_PkgWatt))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float);
-       if (DO_BIC(BIC_CorWatt))
+       if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float);
        if (DO_BIC(BIC_GFXWatt))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units / interval_float);
@@ -1104,7 +1144,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units / interval_float);
        if (DO_BIC(BIC_Pkg_J))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units);
-       if (DO_BIC(BIC_Cor_J))
+       if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units);
        if (DO_BIC(BIC_GFX_J))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units);
@@ -1249,6 +1289,8 @@ delta_core(struct core_data *new, struct core_data *old)
        old->core_temp_c = new->core_temp_c;
        old->mc6_us = new->mc6_us - old->mc6_us;
 
+       DELTA_WRAP32(new->core_energy, old->core_energy);
+
        for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
                if (mp->format == FORMAT_RAW)
                        old->counter[i] = new->counter[i];
@@ -1391,6 +1433,7 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
        c->c7 = 0;
        c->mc6_us = 0;
        c->core_temp_c = 0;
+       c->core_energy = 0;
 
        p->pkg_wtd_core_c0 = 0;
        p->pkg_any_core_c0 = 0;
@@ -1473,6 +1516,8 @@ int sum_counters(struct thread_data *t, struct core_data *c,
 
        average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c);
 
+       average.cores.core_energy += c->core_energy;
+
        for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
                if (mp->format == FORMAT_RAW)
                        continue;
@@ -1818,7 +1863,7 @@ retry:
        if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
                goto done;
 
-       if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates) {
+       if (DO_BIC(BIC_CPU_c3)) {
                if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
                        return -6;
        }
@@ -1845,6 +1890,12 @@ retry:
                c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
        }
 
+       if (do_rapl & RAPL_AMD_F17H) {
+               if (get_msr(cpu, MSR_CORE_ENERGY_STAT, &msr))
+                       return -14;
+               c->core_energy = msr & 0xFFFFFFFF;
+       }
+
        for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
                if (get_mp(cpu, mp, &c->counter[i]))
                        return -10;
@@ -1934,6 +1985,11 @@ retry:
                        return -16;
                p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
        }
+       if (do_rapl & RAPL_AMD_F17H) {
+               if (get_msr(cpu, MSR_PKG_ENERGY_STAT, &msr))
+                       return -13;
+               p->energy_pkg = msr & 0xFFFFFFFF;
+       }
        if (DO_BIC(BIC_PkgTmp)) {
                if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
                        return -17;
@@ -2456,6 +2512,8 @@ void free_all_buffers(void)
 
 /*
  * Parse a file containing a single int.
+ * Return 0 if file can not be opened
+ * Exit if file can be opened, but can not be parsed
  */
 int parse_int_file(const char *fmt, ...)
 {
@@ -2467,7 +2525,9 @@ int parse_int_file(const char *fmt, ...)
        va_start(args, fmt);
        vsnprintf(path, sizeof(path), fmt, args);
        va_end(args);
-       filep = fopen_or_die(path, "r");
+       filep = fopen(path, "r");
+       if (!filep)
+               return 0;
        if (fscanf(filep, "%d", &value) != 1)
                err(1, "%s: failed to parse number from file", path);
        fclose(filep);
@@ -2488,6 +2548,11 @@ int get_physical_package_id(int cpu)
        return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
 }
 
+int get_die_id(int cpu)
+{
+       return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/die_id", cpu);
+}
+
 int get_core_id(int cpu)
 {
        return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
@@ -2578,7 +2643,8 @@ int get_thread_siblings(struct cpu_topology *thiscpu)
        filep = fopen_or_die(path, "r");
        do {
                offset -= BITMASK_SIZE;
-               fscanf(filep, "%lx%c", &map, &character);
+               if (fscanf(filep, "%lx%c", &map, &character) != 2)
+                       err(1, "%s: failed to parse file", path);
                for (shift = 0; shift < BITMASK_SIZE; shift++) {
                        if ((map >> shift) & 0x1) {
                                so = shift + offset;
@@ -2855,8 +2921,11 @@ int snapshot_cpu_lpi_us(void)
        fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", "r");
 
        retval = fscanf(fp, "%lld", &cpuidle_cur_cpu_lpi_us);
-       if (retval != 1)
-               err(1, "CPU LPI");
+       if (retval != 1) {
+               fprintf(stderr, "Disabling Low Power Idle CPU output\n");
+               BIC_NOT_PRESENT(BIC_CPU_LPI);
+               return -1;
+       }
 
        fclose(fp);
 
@@ -2878,9 +2947,11 @@ int snapshot_sys_lpi_us(void)
        fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", "r");
 
        retval = fscanf(fp, "%lld", &cpuidle_cur_sys_lpi_us);
-       if (retval != 1)
-               err(1, "SYS LPI");
-
+       if (retval != 1) {
+               fprintf(stderr, "Disabling Low Power Idle System output\n");
+               BIC_NOT_PRESENT(BIC_SYS_LPI);
+               return -1;
+       }
        fclose(fp);
 
        return 0;
@@ -3410,14 +3481,14 @@ dump_sysfs_cstate_config(void)
                input = fopen(path, "r");
                if (input == NULL)
                        continue;
-               fgets(name_buf, sizeof(name_buf), input);
+               if (!fgets(name_buf, sizeof(name_buf), input))
+                       err(1, "%s: failed to read file", path);
 
                 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
                sp = strchr(name_buf, '-');
                if (!sp)
                        sp = strchrnul(name_buf, '\n');
                *sp = '\0';
-
                fclose(input);
 
                sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/desc",
@@ -3425,7 +3496,8 @@ dump_sysfs_cstate_config(void)
                input = fopen(path, "r");
                if (input == NULL)
                        continue;
-               fgets(desc, sizeof(desc), input);
+               if (!fgets(desc, sizeof(desc), input))
+                       err(1, "%s: failed to read file", path);
 
                fprintf(outf, "cpu%d: %s: %s", base_cpu, name_buf, desc);
                fclose(input);
@@ -3444,20 +3516,22 @@ dump_sysfs_pstate_config(void)
                        base_cpu);
        input = fopen(path, "r");
        if (input == NULL) {
-               fprintf(stderr, "NSFOD %s\n", path);
+               fprintf(outf, "NSFOD %s\n", path);
                return;
        }
-       fgets(driver_buf, sizeof(driver_buf), input);
+       if (!fgets(driver_buf, sizeof(driver_buf), input))
+               err(1, "%s: failed to read file", path);
        fclose(input);
 
        sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor",
                        base_cpu);
        input = fopen(path, "r");
        if (input == NULL) {
-               fprintf(stderr, "NSFOD %s\n", path);
+               fprintf(outf, "NSFOD %s\n", path);
                return;
        }
-       fgets(governor_buf, sizeof(governor_buf), input);
+       if (!fgets(governor_buf, sizeof(governor_buf), input))
+               err(1, "%s: failed to read file", path);
        fclose(input);
 
        fprintf(outf, "cpu%d: cpufreq driver: %s", base_cpu, driver_buf);
@@ -3466,7 +3540,8 @@ dump_sysfs_pstate_config(void)
        sprintf(path, "/sys/devices/system/cpu/cpufreq/boost");
        input = fopen(path, "r");
        if (input != NULL) {
-               fscanf(input, "%d", &turbo);
+               if (fscanf(input, "%d", &turbo) != 1)
+                       err(1, "%s: failed to parse number from file", path);
                fprintf(outf, "cpufreq boost: %d\n", turbo);
                fclose(input);
        }
@@ -3474,7 +3549,8 @@ dump_sysfs_pstate_config(void)
        sprintf(path, "/sys/devices/system/cpu/intel_pstate/no_turbo");
        input = fopen(path, "r");
        if (input != NULL) {
-               fscanf(input, "%d", &turbo);
+               if (fscanf(input, "%d", &turbo) != 1)
+                       err(1, "%s: failed to parse number from file", path);
                fprintf(outf, "cpufreq intel_pstate no_turbo: %d\n", turbo);
                fclose(input);
        }
@@ -3718,7 +3794,7 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data
 #define        RAPL_POWER_GRANULARITY  0x7FFF  /* 15 bit power granularity */
 #define        RAPL_TIME_GRANULARITY   0x3F /* 6 bit time granularity */
 
-double get_tdp(unsigned int model)
+double get_tdp_intel(unsigned int model)
 {
        unsigned long long msr;
 
@@ -3735,6 +3811,16 @@ double get_tdp(unsigned int model)
        }
 }
 
+double get_tdp_amd(unsigned int family)
+{
+       switch (family) {
+       case 0x17:
+       default:
+               /* This is the max stock TDP of HEDT/Server Fam17h chips */
+               return 250.0;
+       }
+}
+
 /*
  * rapl_dram_energy_units_probe()
  * Energy units are either hard-coded, or come from RAPL Energy Unit MSR.
@@ -3754,21 +3840,12 @@ rapl_dram_energy_units_probe(int  model, double rapl_energy_units)
        }
 }
 
-
-/*
- * rapl_probe()
- *
- * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
- */
-void rapl_probe(unsigned int family, unsigned int model)
+void rapl_probe_intel(unsigned int family, unsigned int model)
 {
        unsigned long long msr;
        unsigned int time_unit;
        double tdp;
 
-       if (!genuine_intel)
-               return;
-
        if (family != 6)
                return;
 
@@ -3892,13 +3969,69 @@ void rapl_probe(unsigned int family, unsigned int model)
 
        rapl_time_units = 1.0 / (1 << (time_unit));
 
-       tdp = get_tdp(model);
+       tdp = get_tdp_intel(model);
 
        rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
        if (!quiet)
                fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
+}
 
-       return;
+void rapl_probe_amd(unsigned int family, unsigned int model)
+{
+       unsigned long long msr;
+       unsigned int eax, ebx, ecx, edx;
+       unsigned int has_rapl = 0;
+       double tdp;
+
+       if (max_extended_level >= 0x80000007) {
+               __cpuid(0x80000007, eax, ebx, ecx, edx);
+               /* RAPL (Fam 17h) */
+               has_rapl = edx & (1 << 14);
+       }
+
+       if (!has_rapl)
+               return;
+
+       switch (family) {
+       case 0x17: /* Zen, Zen+ */
+               do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY;
+               if (rapl_joules) {
+                       BIC_PRESENT(BIC_Pkg_J);
+                       BIC_PRESENT(BIC_Cor_J);
+               } else {
+                       BIC_PRESENT(BIC_PkgWatt);
+                       BIC_PRESENT(BIC_CorWatt);
+               }
+               break;
+       default:
+               return;
+       }
+
+       if (get_msr(base_cpu, MSR_RAPL_PWR_UNIT, &msr))
+               return;
+
+       rapl_time_units = ldexp(1.0, -(msr >> 16 & 0xf));
+       rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f));
+       rapl_power_units = ldexp(1.0, -(msr & 0xf));
+
+       tdp = get_tdp_amd(model);
+
+       rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
+       if (!quiet)
+               fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
+}
+
+/*
+ * rapl_probe()
+ *
+ * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
+ */
+void rapl_probe(unsigned int family, unsigned int model)
+{
+       if (genuine_intel)
+               rapl_probe_intel(family, model);
+       if (authentic_amd)
+               rapl_probe_amd(family, model);
 }
 
 void perf_limit_reasons_probe(unsigned int family, unsigned int model)
@@ -4003,6 +4136,7 @@ void print_power_limit_msr(int cpu, unsigned long long msr, char *label)
 int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
 {
        unsigned long long msr;
+       const char *msr_name;
        int cpu;
 
        if (!do_rapl)
@@ -4018,10 +4152,17 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                return -1;
        }
 
-       if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
-               return -1;
+       if (do_rapl & RAPL_AMD_F17H) {
+               msr_name = "MSR_RAPL_PWR_UNIT";
+               if (get_msr(cpu, MSR_RAPL_PWR_UNIT, &msr))
+                       return -1;
+       } else {
+               msr_name = "MSR_RAPL_POWER_UNIT";
+               if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
+                       return -1;
+       }
 
-       fprintf(outf, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr,
+       fprintf(outf, "cpu%d: %s: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr_name, msr,
                rapl_power_units, rapl_energy_units, rapl_time_units);
 
        if (do_rapl & RAPL_PKG_POWER_INFO) {
@@ -4451,6 +4592,9 @@ unsigned int intel_model_duplicates(unsigned int model)
        case INTEL_FAM6_KABYLAKE_MOBILE:
        case INTEL_FAM6_KABYLAKE_DESKTOP:
                return INTEL_FAM6_SKYLAKE_MOBILE;
+
+       case INTEL_FAM6_ICELAKE_MOBILE:
+               return INTEL_FAM6_CANNONLAKE_MOBILE;
        }
        return model;
 }
@@ -4702,7 +4846,9 @@ void process_cpuid()
        }
        do_slm_cstates = is_slm(family, model);
        do_knl_cstates  = is_knl(family, model);
-       do_cnl_cstates = is_cnl(family, model);
+
+       if (do_slm_cstates || do_knl_cstates || is_cnl(family, model))
+               BIC_NOT_PRESENT(BIC_CPU_c3);
 
        if (!quiet)
                decode_misc_pwr_mgmt_msr();
@@ -4769,6 +4915,7 @@ void topology_probe()
        int i;
        int max_core_id = 0;
        int max_package_id = 0;
+       int max_die_id = 0;
        int max_siblings = 0;
 
        /* Initialize num_cpus, max_cpu_num */
@@ -4835,6 +4982,11 @@ void topology_probe()
                if (cpus[i].physical_package_id > max_package_id)
                        max_package_id = cpus[i].physical_package_id;
 
+               /* get die information */
+               cpus[i].die_id = get_die_id(i);
+               if (cpus[i].die_id > max_die_id)
+                       max_die_id = cpus[i].die_id;
+
                /* get numa node information */
                cpus[i].physical_node_id = get_physical_node_id(&cpus[i]);
                if (cpus[i].physical_node_id > topo.max_node_num)
@@ -4860,6 +5012,13 @@ void topology_probe()
        if (!summary_only && topo.cores_per_node > 1)
                BIC_PRESENT(BIC_Core);
 
+       topo.num_die = max_die_id + 1;
+       if (debug > 1)
+               fprintf(outf, "max_die_id %d, sizing for %d die\n",
+                               max_die_id, topo.num_die);
+       if (!summary_only && topo.num_die > 1)
+               BIC_PRESENT(BIC_Die);
+
        topo.num_packages = max_package_id + 1;
        if (debug > 1)
                fprintf(outf, "max_package_id %d, sizing for %d packages\n",
@@ -4884,8 +5043,8 @@ void topology_probe()
                if (cpu_is_not_present(i))
                        continue;
                fprintf(outf,
-                       "cpu %d pkg %d node %d lnode %d core %d thread %d\n",
-                       i, cpus[i].physical_package_id,
+                       "cpu %d pkg %d die %d node %d lnode %d core %d thread %d\n",
+                       i, cpus[i].physical_package_id, cpus[i].die_id,
                        cpus[i].physical_node_id,
                        cpus[i].logical_node_id,
                        cpus[i].physical_core_id,
@@ -5122,7 +5281,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-       fprintf(outf, "turbostat version 18.07.27"
+       fprintf(outf, "turbostat version 19.03.20"
                " - Len Brown <lenb@kernel.org>\n");
 }
 
@@ -5319,7 +5478,8 @@ void probe_sysfs(void)
                input = fopen(path, "r");
                if (input == NULL)
                        continue;
-               fgets(name_buf, sizeof(name_buf), input);
+               if (!fgets(name_buf, sizeof(name_buf), input))
+                       err(1, "%s: failed to read file", path);
 
                 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
                sp = strchr(name_buf, '-');
@@ -5346,7 +5506,8 @@ void probe_sysfs(void)
                input = fopen(path, "r");
                if (input == NULL)
                        continue;
-               fgets(name_buf, sizeof(name_buf), input);
+               if (!fgets(name_buf, sizeof(name_buf), input))
+                       err(1, "%s: failed to read file", path);
                 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
                sp = strchr(name_buf, '-');
                if (!sp)
index b579f962451d6464035c6649ac714998c05a225f..85ffdcfa596b5011b93abf3c65e90cd33cceb61f 100644 (file)
@@ -146,6 +146,7 @@ static int dimm_fail_cmd_code[ARRAY_SIZE(handle)];
 struct nfit_test_sec {
        u8 state;
        u8 ext_state;
+       u8 old_state;
        u8 passphrase[32];
        u8 master_passphrase[32];
        u64 overwrite_end_time;
@@ -225,6 +226,8 @@ static struct workqueue_struct *nfit_wq;
 
 static struct gen_pool *nfit_pool;
 
+static const char zero_key[NVDIMM_PASSPHRASE_LEN];
+
 static struct nfit_test *to_nfit_test(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -1059,8 +1062,7 @@ static int nd_intel_test_cmd_secure_erase(struct nfit_test *t,
        struct device *dev = &t->pdev.dev;
        struct nfit_test_sec *sec = &dimm_sec_info[dimm];
 
-       if (!(sec->state & ND_INTEL_SEC_STATE_ENABLED) ||
-                       (sec->state & ND_INTEL_SEC_STATE_FROZEN)) {
+       if (sec->state & ND_INTEL_SEC_STATE_FROZEN) {
                nd_cmd->status = ND_INTEL_STATUS_INVALID_STATE;
                dev_dbg(dev, "secure erase: wrong security state\n");
        } else if (memcmp(nd_cmd->passphrase, sec->passphrase,
@@ -1068,6 +1070,12 @@ static int nd_intel_test_cmd_secure_erase(struct nfit_test *t,
                nd_cmd->status = ND_INTEL_STATUS_INVALID_PASS;
                dev_dbg(dev, "secure erase: wrong passphrase\n");
        } else {
+               if (!(sec->state & ND_INTEL_SEC_STATE_ENABLED)
+                               && (memcmp(nd_cmd->passphrase, zero_key,
+                                       ND_INTEL_PASSPHRASE_SIZE) != 0)) {
+                       dev_dbg(dev, "invalid zero key\n");
+                       return 0;
+               }
                memset(sec->passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
                memset(sec->master_passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
                sec->state = 0;
@@ -1093,7 +1101,7 @@ static int nd_intel_test_cmd_overwrite(struct nfit_test *t,
                return 0;
        }
 
-       memset(sec->passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
+       sec->old_state = sec->state;
        sec->state = ND_INTEL_SEC_STATE_OVERWRITE;
        dev_dbg(dev, "overwrite progressing.\n");
        sec->overwrite_end_time = get_jiffies_64() + 5 * HZ;
@@ -1115,7 +1123,8 @@ static int nd_intel_test_cmd_query_overwrite(struct nfit_test *t,
 
        if (time_is_before_jiffies64(sec->overwrite_end_time)) {
                sec->overwrite_end_time = 0;
-               sec->state = 0;
+               sec->state = sec->old_state;
+               sec->old_state = 0;
                sec->ext_state = ND_INTEL_SEC_ESTATE_ENABLED;
                dev_dbg(dev, "overwrite is complete\n");
        } else
index bcbd928c96aba4ab3a2a13984b1987f1497a412d..fc818bc1d7294454093a949b3e5f394ca9879982 100644 (file)
@@ -39,6 +39,58 @@ static struct bpf_flow_keys pkt_v6_flow_keys = {
        .n_proto = __bpf_constant_htons(ETH_P_IPV6),
 };
 
+#define VLAN_HLEN      4
+
+static struct {
+       struct ethhdr eth;
+       __u16 vlan_tci;
+       __u16 vlan_proto;
+       struct iphdr iph;
+       struct tcphdr tcp;
+} __packed pkt_vlan_v4 = {
+       .eth.h_proto = __bpf_constant_htons(ETH_P_8021Q),
+       .vlan_proto = __bpf_constant_htons(ETH_P_IP),
+       .iph.ihl = 5,
+       .iph.protocol = IPPROTO_TCP,
+       .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
+       .tcp.urg_ptr = 123,
+       .tcp.doff = 5,
+};
+
+static struct bpf_flow_keys pkt_vlan_v4_flow_keys = {
+       .nhoff = VLAN_HLEN,
+       .thoff = VLAN_HLEN + sizeof(struct iphdr),
+       .addr_proto = ETH_P_IP,
+       .ip_proto = IPPROTO_TCP,
+       .n_proto = __bpf_constant_htons(ETH_P_IP),
+};
+
+static struct {
+       struct ethhdr eth;
+       __u16 vlan_tci;
+       __u16 vlan_proto;
+       __u16 vlan_tci2;
+       __u16 vlan_proto2;
+       struct ipv6hdr iph;
+       struct tcphdr tcp;
+} __packed pkt_vlan_v6 = {
+       .eth.h_proto = __bpf_constant_htons(ETH_P_8021AD),
+       .vlan_proto = __bpf_constant_htons(ETH_P_8021Q),
+       .vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6),
+       .iph.nexthdr = IPPROTO_TCP,
+       .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
+       .tcp.urg_ptr = 123,
+       .tcp.doff = 5,
+};
+
+static struct bpf_flow_keys pkt_vlan_v6_flow_keys = {
+       .nhoff = VLAN_HLEN * 2,
+       .thoff = VLAN_HLEN * 2 + sizeof(struct ipv6hdr),
+       .addr_proto = ETH_P_IPV6,
+       .ip_proto = IPPROTO_TCP,
+       .n_proto = __bpf_constant_htons(ETH_P_IPV6),
+};
+
 void test_flow_dissector(void)
 {
        struct bpf_flow_keys flow_keys;
@@ -68,5 +120,21 @@ void test_flow_dissector(void)
              err, errno, retval, duration, size, sizeof(flow_keys));
        CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys);
 
+       err = bpf_prog_test_run(prog_fd, 10, &pkt_vlan_v4, sizeof(pkt_vlan_v4),
+                               &flow_keys, &size, &retval, &duration);
+       CHECK(size != sizeof(flow_keys) || err || retval != 1, "vlan_ipv4",
+             "err %d errno %d retval %d duration %d size %u/%lu\n",
+             err, errno, retval, duration, size, sizeof(flow_keys));
+       CHECK_FLOW_KEYS("vlan_ipv4_flow_keys", flow_keys,
+                       pkt_vlan_v4_flow_keys);
+
+       err = bpf_prog_test_run(prog_fd, 10, &pkt_vlan_v6, sizeof(pkt_vlan_v6),
+                               &flow_keys, &size, &retval, &duration);
+       CHECK(size != sizeof(flow_keys) || err || retval != 1, "vlan_ipv6",
+             "err %d errno %d retval %d duration %d size %u/%lu\n",
+             err, errno, retval, duration, size, sizeof(flow_keys));
+       CHECK_FLOW_KEYS("vlan_ipv6_flow_keys", flow_keys,
+                       pkt_vlan_v6_flow_keys);
+
        bpf_object__close(obj);
 }
index 284660f5aa9533aaee034e13242f5ea098729676..75b17cada53937e5e237e00049a1bc22176cb83c 100644 (file)
@@ -92,7 +92,6 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
 {
        struct bpf_flow_keys *keys = skb->flow_keys;
 
-       keys->n_proto = proto;
        switch (proto) {
        case bpf_htons(ETH_P_IP):
                bpf_tail_call(skb, &jmp_table, IP);
@@ -119,10 +118,9 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
 SEC("flow_dissector")
 int _dissect(struct __sk_buff *skb)
 {
-       if (!skb->vlan_present)
-               return parse_eth_proto(skb, skb->protocol);
-       else
-               return parse_eth_proto(skb, skb->vlan_proto);
+       struct bpf_flow_keys *keys = skb->flow_keys;
+
+       return parse_eth_proto(skb, keys->n_proto);
 }
 
 /* Parses on IPPROTO_* */
@@ -336,15 +334,9 @@ PROG(VLAN)(struct __sk_buff *skb)
 {
        struct bpf_flow_keys *keys = skb->flow_keys;
        struct vlan_hdr *vlan, _vlan;
-       __be16 proto;
-
-       /* Peek back to see if single or double-tagging */
-       if (bpf_skb_load_bytes(skb, keys->thoff - sizeof(proto), &proto,
-                              sizeof(proto)))
-               return BPF_DROP;
 
        /* Account for double-tagging */
-       if (proto == bpf_htons(ETH_P_8021AD)) {
+       if (keys->n_proto == bpf_htons(ETH_P_8021AD)) {
                vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
                if (!vlan)
                        return BPF_DROP;
@@ -352,6 +344,7 @@ PROG(VLAN)(struct __sk_buff *skb)
                if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q))
                        return BPF_DROP;
 
+               keys->nhoff += sizeof(*vlan);
                keys->thoff += sizeof(*vlan);
        }
 
@@ -359,12 +352,14 @@ PROG(VLAN)(struct __sk_buff *skb)
        if (!vlan)
                return BPF_DROP;
 
+       keys->nhoff += sizeof(*vlan);
        keys->thoff += sizeof(*vlan);
        /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/
        if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) ||
            vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q))
                return BPF_DROP;
 
+       keys->n_proto = vlan->h_vlan_encapsulated_proto;
        return parse_eth_proto(skb, vlan->h_vlan_encapsulated_proto);
 }
 
index 23e3b314ca603956ce88ed4ac8f1512eeddfa34f..ec5794e4205bc04dabcaa20532cfe4cd2fcf6bbd 100644 (file)
@@ -5776,6 +5776,53 @@ const struct btf_dedup_test dedup_tests[] = {
                .dedup_table_size = 1, /* force hash collisions */
        },
 },
+{
+       .descr = "dedup: void equiv check",
+       /*
+        * // CU 1:
+        * struct s {
+        *      struct {} *x;
+        * };
+        * // CU 2:
+        * struct s {
+        *      int *x;
+        * };
+        */
+       .input = {
+               .raw_types = {
+                       /* CU 1 */
+                       BTF_STRUCT_ENC(0, 0, 1),                                /* [1] struct {}  */
+                       BTF_PTR_ENC(1),                                         /* [2] ptr -> [1] */
+                       BTF_STRUCT_ENC(NAME_NTH(1), 1, 8),                      /* [3] struct s   */
+                               BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+                       /* CU 2 */
+                       BTF_PTR_ENC(0),                                         /* [4] ptr -> void */
+                       BTF_STRUCT_ENC(NAME_NTH(1), 1, 8),                      /* [5] struct s   */
+                               BTF_MEMBER_ENC(NAME_NTH(2), 4, 0),
+                       BTF_END_RAW,
+               },
+               BTF_STR_SEC("\0s\0x"),
+       },
+       .expect = {
+               .raw_types = {
+                       /* CU 1 */
+                       BTF_STRUCT_ENC(0, 0, 1),                                /* [1] struct {}  */
+                       BTF_PTR_ENC(1),                                         /* [2] ptr -> [1] */
+                       BTF_STRUCT_ENC(NAME_NTH(1), 1, 8),                      /* [3] struct s   */
+                               BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+                       /* CU 2 */
+                       BTF_PTR_ENC(0),                                         /* [4] ptr -> void */
+                       BTF_STRUCT_ENC(NAME_NTH(1), 1, 8),                      /* [5] struct s   */
+                               BTF_MEMBER_ENC(NAME_NTH(2), 4, 0),
+                       BTF_END_RAW,
+               },
+               BTF_STR_SEC("\0s\0x"),
+       },
+       .opts = {
+               .dont_resolve_fwds = false,
+               .dedup_table_size = 1, /* force hash collisions */
+       },
+},
 {
        .descr = "dedup: all possible kinds (no duplicates)",
        .input = {
index f2ccae39ee66b32c8b60890dcacff0bd89c8abd0..fb11240b758b1a60f864473d4ee4caa1ff2932a4 100644 (file)
        .errstr = "call stack",
        .result = REJECT,
 },
+{
+       "calls: stack depth check in dead code",
+       .insns = {
+       /* main */
+       BPF_MOV64_IMM(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
+       BPF_EXIT_INSN(),
+       /* A */
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       /* B */
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
+       BPF_EXIT_INSN(),
+       /* C */
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
+       BPF_EXIT_INSN(),
+       /* D */
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
+       BPF_EXIT_INSN(),
+       /* E */
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
+       BPF_EXIT_INSN(),
+       /* F */
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
+       BPF_EXIT_INSN(),
+       /* G */
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
+       BPF_EXIT_INSN(),
+       /* H */
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .errstr = "call stack",
+       .result = REJECT,
+},
 {
        "calls: spill into caller stack frame",
        .insns = {
index c4cf6e6d800ebe3d2d595805397ce1b3f70e7de3..a6c196c8534cea2a49d600aa23b39070bfd481f0 100755 (executable)
@@ -11,6 +11,7 @@ lib_dir=$(dirname $0)/../../../net/forwarding
 
 ALL_TESTS="
        rif_set_addr_test
+       rif_vrf_set_addr_test
        rif_inherit_bridge_addr_test
        rif_non_inherit_bridge_addr_test
        vlan_interface_deletion_test
@@ -98,6 +99,25 @@ rif_set_addr_test()
        ip link set dev $swp1 addr $swp1_mac
 }
 
+rif_vrf_set_addr_test()
+{
+       # Test that it is possible to set an IP address on a VRF upper despite
+       # its random MAC address.
+       RET=0
+
+       ip link add name vrf-test type vrf table 10
+       ip link set dev $swp1 master vrf-test
+
+       ip -4 address add 192.0.2.1/24 dev vrf-test
+       check_err $? "failed to set IPv4 address on VRF"
+       ip -6 address add 2001:db8:1::1/64 dev vrf-test
+       check_err $? "failed to set IPv6 address on VRF"
+
+       log_test "RIF - setting IP address on VRF"
+
+       ip link del dev vrf-test
+}
+
 rif_inherit_bridge_addr_test()
 {
        RET=0
index 7514fcea91a73e80a91313ab280fb90e12375138..f8588cca2bef4bfe4d3cdf2afdb6586f21e67894 100644 (file)
@@ -1,3 +1,5 @@
+include ../../../../scripts/Kbuild.include
+
 all:
 
 top_srcdir = ../../../..
@@ -17,6 +19,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/state_test
 TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
 TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
+TEST_GEN_PROGS_x86_64 += x86_64/smm_test
 TEST_GEN_PROGS_x86_64 += dirty_log_test
 TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
 
@@ -30,7 +33,11 @@ INSTALL_HDR_PATH = $(top_srcdir)/usr
 LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
 LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include
 CFLAGS += -O2 -g -std=gnu99 -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
-LDFLAGS += -pthread -no-pie
+
+no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
+        $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
+
+LDFLAGS += -pthread $(no-pie-option)
 
 # After inclusion, $(OUTPUT) is defined and
 # $(TEST_GEN_PROGS) starts with $(OUTPUT)/
index e2884c2b81fff80c1ec6c261828dbb0493b3e98b..6063d5b2f3561c450778f86f3d1474390d79b5ec 100644 (file)
@@ -778,6 +778,33 @@ void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
 #define MSR_IA32_APICBASE_ENABLE       (1<<11)
 #define MSR_IA32_APICBASE_BASE         (0xfffff<<12)
 
+#define APIC_BASE_MSR  0x800
+#define X2APIC_ENABLE  (1UL << 10)
+#define        APIC_ICR        0x300
+#define                APIC_DEST_SELF          0x40000
+#define                APIC_DEST_ALLINC        0x80000
+#define                APIC_DEST_ALLBUT        0xC0000
+#define                APIC_ICR_RR_MASK        0x30000
+#define                APIC_ICR_RR_INVALID     0x00000
+#define                APIC_ICR_RR_INPROG      0x10000
+#define                APIC_ICR_RR_VALID       0x20000
+#define                APIC_INT_LEVELTRIG      0x08000
+#define                APIC_INT_ASSERT         0x04000
+#define                APIC_ICR_BUSY           0x01000
+#define                APIC_DEST_LOGICAL       0x00800
+#define                APIC_DEST_PHYSICAL      0x00000
+#define                APIC_DM_FIXED           0x00000
+#define                APIC_DM_FIXED_MASK      0x00700
+#define                APIC_DM_LOWEST          0x00100
+#define                APIC_DM_SMI             0x00200
+#define                APIC_DM_REMRD           0x00300
+#define                APIC_DM_NMI             0x00400
+#define                APIC_DM_INIT            0x00500
+#define                APIC_DM_STARTUP         0x00600
+#define                APIC_DM_EXTINT          0x00700
+#define                APIC_VECTOR_MASK        0x000FF
+#define        APIC_ICR2       0x310
+
 #define MSR_IA32_TSCDEADLINE           0x000006e0
 
 #define MSR_IA32_UCODE_WRITE           0x00000079
index efa0aad8b3c69ab370a1f5440194cee3486c11db..4ca96b228e46ba248476803583cb94d14410ff16 100644 (file)
@@ -91,6 +91,11 @@ static void vm_open(struct kvm_vm *vm, int perm, unsigned long type)
        if (vm->kvm_fd < 0)
                exit(KSFT_SKIP);
 
+       if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
+               fprintf(stderr, "immediate_exit not available, skipping test\n");
+               exit(KSFT_SKIP);
+       }
+
        vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, type);
        TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, "
                "rc: %i errno: %i", vm->fd, errno);
index f28127f4a3af63cb9ac15d2124f425e7492fccda..dc7fae9fa424cf2b45fb7acf10c4b58c272763a0 100644 (file)
@@ -1030,6 +1030,14 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
                            nested_size, sizeof(state->nested_));
        }
 
+       /*
+        * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees
+        * guest state is consistent only after userspace re-enters the
+        * kernel with KVM_RUN.  Complete IO prior to migrating state
+        * to a new VM.
+        */
+       vcpu_run_complete_io(vm, vcpuid);
+
        nmsrs = kvm_get_num_msrs(vm);
        list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
        list->nmsrs = nmsrs;
@@ -1093,12 +1101,6 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
        struct vcpu *vcpu = vcpu_find(vm, vcpuid);
        int r;
 
-       if (state->nested.size) {
-               r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested);
-               TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i",
-                       r);
-       }
-
        r = ioctl(vcpu->fd, KVM_SET_XSAVE, &state->xsave);
         TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
                 r);
@@ -1130,4 +1132,10 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
        r = ioctl(vcpu->fd, KVM_SET_REGS, &state->regs);
         TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_REGS, r: %i",
                 r);
+
+       if (state->nested.size) {
+               r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested);
+               TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i",
+                       r);
+       }
 }
index c49c2a28b0eb290ccd6c51498a0b9fd716b58b07..36669684eca58a6c09140453f70a403cf0119348 100644 (file)
@@ -123,8 +123,6 @@ int main(int argc, char *argv[])
                            stage, run->exit_reason,
                            exit_reason_str(run->exit_reason));
 
-               memset(&regs1, 0, sizeof(regs1));
-               vcpu_regs_get(vm, VCPU_ID, &regs1);
                switch (get_ucall(vm, VCPU_ID, &uc)) {
                case UCALL_ABORT:
                        TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
@@ -144,6 +142,9 @@ int main(int argc, char *argv[])
                            stage, (ulong)uc.args[1]);
 
                state = vcpu_save_state(vm, VCPU_ID);
+               memset(&regs1, 0, sizeof(regs1));
+               vcpu_regs_get(vm, VCPU_ID, &regs1);
+
                kvm_vm_release(vm);
 
                /* Restore state in a new VM.  */
diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c
new file mode 100644 (file)
index 0000000..fb80869
--- /dev/null
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018, Red Hat, Inc.
+ *
+ * Tests for SMM.
+ */
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+
+#include "kvm_util.h"
+
+#include "vmx.h"
+
+#define VCPU_ID              1
+
+#define PAGE_SIZE  4096
+
+#define SMRAM_SIZE 65536
+#define SMRAM_MEMSLOT ((1 << 16) | 1)
+#define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
+#define SMRAM_GPA 0x1000000
+#define SMRAM_STAGE 0xfe
+
+#define STR(x) #x
+#define XSTR(s) STR(s)
+
+#define SYNC_PORT 0xe
+#define DONE 0xff
+
+/*
+ * This is compiled as normal 64-bit code, however, SMI handler is executed
+ * in real-address mode. To stay simple we're limiting ourselves to a mode
+ * independent subset of asm here.
+ * SMI handler always report back fixed stage SMRAM_STAGE.
+ */
+uint8_t smi_handler[] = {
+       0xb0, SMRAM_STAGE,    /* mov $SMRAM_STAGE, %al */
+       0xe4, SYNC_PORT,      /* in $SYNC_PORT, %al */
+       0x0f, 0xaa,           /* rsm */
+};
+
+void sync_with_host(uint64_t phase)
+{
+       asm volatile("in $" XSTR(SYNC_PORT)", %%al \n"
+                    : : "a" (phase));
+}
+
+void self_smi(void)
+{
+       wrmsr(APIC_BASE_MSR + (APIC_ICR >> 4),
+             APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);
+}
+
+void guest_code(struct vmx_pages *vmx_pages)
+{
+       uint64_t apicbase = rdmsr(MSR_IA32_APICBASE);
+
+       sync_with_host(1);
+
+       wrmsr(MSR_IA32_APICBASE, apicbase | X2APIC_ENABLE);
+
+       sync_with_host(2);
+
+       self_smi();
+
+       sync_with_host(4);
+
+       if (vmx_pages) {
+               GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+
+               sync_with_host(5);
+
+               self_smi();
+
+               sync_with_host(7);
+       }
+
+       sync_with_host(DONE);
+}
+
+int main(int argc, char *argv[])
+{
+       struct vmx_pages *vmx_pages = NULL;
+       vm_vaddr_t vmx_pages_gva = 0;
+
+       struct kvm_regs regs;
+       struct kvm_vm *vm;
+       struct kvm_run *run;
+       struct kvm_x86_state *state;
+       int stage, stage_reported;
+
+       /* Create VM */
+       vm = vm_create_default(VCPU_ID, 0, guest_code);
+
+       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+
+       run = vcpu_state(vm, VCPU_ID);
+
+       vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA,
+                                   SMRAM_MEMSLOT, SMRAM_PAGES, 0);
+       TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT)
+                   == SMRAM_GPA, "could not allocate guest physical addresses?");
+
+       memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE);
+       memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler,
+              sizeof(smi_handler));
+
+       vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA);
+
+       if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
+               vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva);
+               vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+       } else {
+               printf("will skip SMM test with VMX enabled\n");
+               vcpu_args_set(vm, VCPU_ID, 1, 0);
+       }
+
+       for (stage = 1;; stage++) {
+               _vcpu_run(vm, VCPU_ID);
+               TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+                           "Stage %d: unexpected exit reason: %u (%s),\n",
+                           stage, run->exit_reason,
+                           exit_reason_str(run->exit_reason));
+
+               memset(&regs, 0, sizeof(regs));
+               vcpu_regs_get(vm, VCPU_ID, &regs);
+
+               stage_reported = regs.rax & 0xff;
+
+               if (stage_reported == DONE)
+                       goto done;
+
+               TEST_ASSERT(stage_reported == stage ||
+                           stage_reported == SMRAM_STAGE,
+                           "Unexpected stage: #%x, got %x",
+                           stage, stage_reported);
+
+               state = vcpu_save_state(vm, VCPU_ID);
+               kvm_vm_release(vm);
+               kvm_vm_restart(vm, O_RDWR);
+               vm_vcpu_add(vm, VCPU_ID, 0, 0);
+               vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+               vcpu_load_state(vm, VCPU_ID, state);
+               run = vcpu_state(vm, VCPU_ID);
+               free(state);
+       }
+
+done:
+       kvm_vm_free(vm);
+}
index 30f75856cf3984277bee22caad9e5df95f98aa26..e0a3c0204b7cd11c5da7024bea68f0da71e41bab 100644 (file)
@@ -134,11 +134,6 @@ int main(int argc, char *argv[])
 
        struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
 
-       if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
-               fprintf(stderr, "immediate_exit not available, skipping test\n");
-               exit(KSFT_SKIP);
-       }
-
        /* Create VM */
        vm = vm_create_default(VCPU_ID, 0, guest_code);
        vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
@@ -179,18 +174,10 @@ int main(int argc, char *argv[])
                            uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx",
                            stage, (ulong)uc.args[1]);
 
-               /*
-                * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees
-                * guest state is consistent only after userspace re-enters the
-                * kernel with KVM_RUN.  Complete IO prior to migrating state
-                * to a new VM.
-                */
-               vcpu_run_complete_io(vm, VCPU_ID);
-
+               state = vcpu_save_state(vm, VCPU_ID);
                memset(&regs1, 0, sizeof(regs1));
                vcpu_regs_get(vm, VCPU_ID, &regs1);
 
-               state = vcpu_save_state(vm, VCPU_ID);
                kvm_vm_release(vm);
 
                /* Restore state in a new VM.  */
index 1080ff55a788f720f240271741fbc38680061b7a..0d2a5f4f1e63829f3ca8dfcd8695b91409823f7f 100755 (executable)
@@ -605,6 +605,39 @@ run_cmd()
        return $rc
 }
 
+check_expected()
+{
+       local out="$1"
+       local expected="$2"
+       local rc=0
+
+       [ "${out}" = "${expected}" ] && return 0
+
+       if [ -z "${out}" ]; then
+               if [ "$VERBOSE" = "1" ]; then
+                       printf "\nNo route entry found\n"
+                       printf "Expected:\n"
+                       printf "    ${expected}\n"
+               fi
+               return 1
+       fi
+
+       # tricky way to convert output to 1-line without ip's
+       # messy '\'; this drops all extra white space
+       out=$(echo ${out})
+       if [ "${out}" != "${expected}" ]; then
+               rc=1
+               if [ "${VERBOSE}" = "1" ]; then
+                       printf "    Unexpected route entry. Have:\n"
+                       printf "        ${out}\n"
+                       printf "    Expected:\n"
+                       printf "        ${expected}\n\n"
+               fi
+       fi
+
+       return $rc
+}
+
 # add route for a prefix, flushing any existing routes first
 # expected to be the first step of a test
 add_route6()
@@ -652,31 +685,7 @@ check_route6()
        pfx=$1
 
        out=$($IP -6 ro ls match ${pfx} | sed -e 's/ pref medium//')
-       [ "${out}" = "${expected}" ] && return 0
-
-       if [ -z "${out}" ]; then
-               if [ "$VERBOSE" = "1" ]; then
-                       printf "\nNo route entry found\n"
-                       printf "Expected:\n"
-                       printf "    ${expected}\n"
-               fi
-               return 1
-       fi
-
-       # tricky way to convert output to 1-line without ip's
-       # messy '\'; this drops all extra white space
-       out=$(echo ${out})
-       if [ "${out}" != "${expected}" ]; then
-               rc=1
-               if [ "${VERBOSE}" = "1" ]; then
-                       printf "    Unexpected route entry. Have:\n"
-                       printf "        ${out}\n"
-                       printf "    Expected:\n"
-                       printf "        ${expected}\n\n"
-               fi
-       fi
-
-       return $rc
+       check_expected "${out}" "${expected}"
 }
 
 route_cleanup()
@@ -725,7 +734,7 @@ route_setup()
        ip -netns ns2 addr add 172.16.103.2/24 dev veth4
        ip -netns ns2 addr add 172.16.104.1/24 dev dummy1
 
-       set +ex
+       set +e
 }
 
 # assumption is that basic add of a single path route works
@@ -960,7 +969,8 @@ ipv6_addr_metric_test()
        run_cmd "$IP li set dev dummy2 down"
        rc=$?
        if [ $rc -eq 0 ]; then
-               check_route6 ""
+               out=$($IP -6 ro ls match 2001:db8:104::/64)
+               check_expected "${out}" ""
                rc=$?
        fi
        log_test $rc 0 "Prefix route removed on link down"
@@ -1091,38 +1101,13 @@ check_route()
        local pfx
        local expected="$1"
        local out
-       local rc=0
 
        set -- $expected
        pfx=$1
        [ "${pfx}" = "unreachable" ] && pfx=$2
 
        out=$($IP ro ls match ${pfx})
-       [ "${out}" = "${expected}" ] && return 0
-
-       if [ -z "${out}" ]; then
-               if [ "$VERBOSE" = "1" ]; then
-                       printf "\nNo route entry found\n"
-                       printf "Expected:\n"
-                       printf "    ${expected}\n"
-               fi
-               return 1
-       fi
-
-       # tricky way to convert output to 1-line without ip's
-       # messy '\'; this drops all extra white space
-       out=$(echo ${out})
-       if [ "${out}" != "${expected}" ]; then
-               rc=1
-               if [ "${VERBOSE}" = "1" ]; then
-                       printf "    Unexpected route entry. Have:\n"
-                       printf "        ${out}\n"
-                       printf "    Expected:\n"
-                       printf "        ${expected}\n\n"
-               fi
-       fi
-
-       return $rc
+       check_expected "${out}" "${expected}"
 }
 
 # assumption is that basic add of a single path route works
@@ -1387,7 +1372,8 @@ ipv4_addr_metric_test()
        run_cmd "$IP li set dev dummy2 down"
        rc=$?
        if [ $rc -eq 0 ]; then
-               check_route ""
+               out=$($IP ro ls match 172.16.104.0/24)
+               check_expected "${out}" ""
                rc=$?
        fi
        log_test $rc 0 "Prefix route removed on link down"
index 7202bbac976ea2b718952421b995ab593ce15fca..853aa164a401e054914cccf2a9663111e1c83939 100644 (file)
@@ -187,8 +187,8 @@ static int make_exe(const uint8_t *payload, size_t len)
        ph.p_offset = 0;
        ph.p_vaddr = VADDR;
        ph.p_paddr = 0;
-       ph.p_filesz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + sizeof(payload);
-       ph.p_memsz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + sizeof(payload);
+       ph.p_filesz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + len;
+       ph.p_memsz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + len;
        ph.p_align = 4096;
 
        fd = openat(AT_FDCWD, "/tmp", O_WRONLY|O_EXCL|O_TMPFILE, 0700);
index 762cb01f2ca719da36873484ef3a3489a6f71b3d..47b7473dedef74ccefead246ec19f20159c78958 100644 (file)
@@ -46,12 +46,9 @@ static void fail(const char *fmt, unsigned long a, unsigned long b)
 
 int main(void)
 {
-       const unsigned int PAGE_SIZE = sysconf(_SC_PAGESIZE);
-#ifdef __arm__
-       unsigned long va = 2 * PAGE_SIZE;
-#else
-       unsigned long va = 0;
-#endif
+       const int PAGE_SIZE = sysconf(_SC_PAGESIZE);
+       const unsigned long va_max = 1UL << 32;
+       unsigned long va;
        void *p;
        int fd;
        unsigned long a, b;
@@ -60,10 +57,13 @@ int main(void)
        if (fd == -1)
                return 1;
 
-       p = mmap((void *)va, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
-       if (p == MAP_FAILED) {
-               if (errno == EPERM)
-                       return 4;
+       for (va = 0; va < va_max; va += PAGE_SIZE) {
+               p = mmap((void *)va, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
+               if (p == (void *)va)
+                       break;
+       }
+       if (va == va_max) {
+               fprintf(stderr, "error: mmap doesn't like you\n");
                return 1;
        }
 
index 27f0acaed880e765e9829306b1cc3a28e2755cbb..ddabb160a11bacc151b9891f14f7da62846b8ed1 100644 (file)
             "$TC actions flush action sample"
         ]
     },
+    {
+        "id": "7571",
+        "name": "Add sample action with invalid rate",
+        "category": [
+            "actions",
+            "sample"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action sample",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action sample rate 0 group 1 index 2",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action sample index 2",
+        "matchPattern": "action order [0-9]+: sample rate 1/0 group 1.*index 2 ref",
+        "matchCount": "0",
+        "teardown": [
+            "$TC actions flush action sample"
+        ]
+    },
     {
         "id": "b6d4",
         "name": "Add sample action with mandatory arguments and invalid control action",
index 99a5ffca1088ad52cdac7842739eaefc40f99c07..2d096b2abf2c943064ae86e674d2a7a65f49757d 100644 (file)
             "$TC qdisc del dev $DEV1 ingress"
         ]
     },
+    {
+        "id": "2638",
+        "name": "Add matchall and try to get it",
+        "category": [
+            "filter",
+            "matchall"
+        ],
+        "setup": [
+            "$TC qdisc add dev $DEV1 clsact",
+            "$TC filter add dev $DEV1 protocol all pref 1 ingress handle 0x1234 matchall action ok"
+        ],
+        "cmdUnderTest": "$TC filter get dev $DEV1 protocol all pref 1 ingress handle 0x1234 matchall",
+        "expExitCode": "0",
+        "verifyCmd": "$TC filter show dev $DEV1 ingress",
+        "matchPattern": "filter protocol all pref 1 matchall chain 0 handle 0x1234",
+        "matchCount": "1",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 clsact"
+        ]
+    },
     {
         "id": "d052",
         "name": "Add 1M filters with the same action",
index 40ea95ce2eadacf07f0dd8b197f94e91a9138b64..828c185846248031ff598670d393e34a38fa24df 100644 (file)
@@ -22,6 +22,7 @@ TPM2_CC_UNSEAL = 0x015E
 TPM2_CC_FLUSH_CONTEXT = 0x0165
 TPM2_CC_START_AUTH_SESSION = 0x0176
 TPM2_CC_GET_CAPABILITY = 0x017A
+TPM2_CC_GET_RANDOM = 0x017B
 TPM2_CC_PCR_READ = 0x017E
 TPM2_CC_POLICY_PCR = 0x017F
 TPM2_CC_PCR_EXTEND = 0x0182
@@ -357,9 +358,9 @@ class Client:
         self.flags = flags
 
         if (self.flags & Client.FLAG_SPACE) == 0:
-            self.tpm = open('/dev/tpm0', 'r+b')
+            self.tpm = open('/dev/tpm0', 'r+b', buffering=0)
         else:
-            self.tpm = open('/dev/tpmrm0', 'r+b')
+            self.tpm = open('/dev/tpmrm0', 'r+b', buffering=0)
 
     def close(self):
         self.tpm.close()
index 3bb066fea4a01171f31f80df653c3a046c35acdd..d4973be53493226b19dcca5e7140e997ea964e8b 100644 (file)
@@ -158,6 +158,69 @@ class SmokeTest(unittest.TestCase):
             pass
         self.assertEqual(rejected, True)
 
+    def test_read_partial_resp(self):
+        try:
+            fmt = '>HIIH'
+            cmd = struct.pack(fmt,
+                              tpm2.TPM2_ST_NO_SESSIONS,
+                              struct.calcsize(fmt),
+                              tpm2.TPM2_CC_GET_RANDOM,
+                              0x20)
+            self.client.tpm.write(cmd)
+            hdr = self.client.tpm.read(10)
+            sz = struct.unpack('>I', hdr[2:6])[0]
+            rsp = self.client.tpm.read()
+        except:
+            pass
+        self.assertEqual(sz, 10 + 2 + 32)
+        self.assertEqual(len(rsp), 2 + 32)
+
+    def test_read_partial_overwrite(self):
+        try:
+            fmt = '>HIIH'
+            cmd = struct.pack(fmt,
+                              tpm2.TPM2_ST_NO_SESSIONS,
+                              struct.calcsize(fmt),
+                              tpm2.TPM2_CC_GET_RANDOM,
+                              0x20)
+            self.client.tpm.write(cmd)
+            # Read part of the respone
+            rsp1 = self.client.tpm.read(15)
+
+            # Send a new cmd
+            self.client.tpm.write(cmd)
+
+            # Read the whole respone
+            rsp2 = self.client.tpm.read()
+        except:
+            pass
+        self.assertEqual(len(rsp1), 15)
+        self.assertEqual(len(rsp2), 10 + 2 + 32)
+
+    def test_send_two_cmds(self):
+        rejected = False
+        try:
+            fmt = '>HIIH'
+            cmd = struct.pack(fmt,
+                              tpm2.TPM2_ST_NO_SESSIONS,
+                              struct.calcsize(fmt),
+                              tpm2.TPM2_CC_GET_RANDOM,
+                              0x20)
+            self.client.tpm.write(cmd)
+
+            # expect the second one to raise -EBUSY error
+            self.client.tpm.write(cmd)
+            rsp = self.client.tpm.read()
+
+        except IOError, e:
+            # read the response
+            rsp = self.client.tpm.read()
+            rejected = True
+            pass
+        except:
+            pass
+        self.assertEqual(rejected, True)
+
 class SpaceTest(unittest.TestCase):
     def setUp(self):
         logging.basicConfig(filename='SpaceTest.log', level=logging.DEBUG)
index 3547b0d8c91ea2c84e0869b769e9947829fe4286..79e59e4fa3dc6be751079e669e214b7fc614e07f 100644 (file)
@@ -144,18 +144,19 @@ static int setup_routing_entry(struct kvm *kvm,
 {
        struct kvm_kernel_irq_routing_entry *ei;
        int r;
+       u32 gsi = array_index_nospec(ue->gsi, KVM_MAX_IRQ_ROUTES);
 
        /*
         * Do not allow GSI to be mapped to the same irqchip more than once.
         * Allow only one to one mapping between GSI and non-irqchip routing.
         */
-       hlist_for_each_entry(ei, &rt->map[ue->gsi], link)
+       hlist_for_each_entry(ei, &rt->map[gsi], link)
                if (ei->type != KVM_IRQ_ROUTING_IRQCHIP ||
                    ue->type != KVM_IRQ_ROUTING_IRQCHIP ||
                    ue->u.irqchip.irqchip == ei->irqchip.irqchip)
                        return -EINVAL;
 
-       e->gsi = ue->gsi;
+       e->gsi = gsi;
        e->type = ue->type;
        r = kvm_set_routing_entry(kvm, e, ue);
        if (r)
index 55fe8e20d8fd9b7367619a250dde9076a74bdc6e..dc8edc97ba850384680b56f88063f61bebfc96c8 100644 (file)
@@ -2977,12 +2977,14 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
        struct kvm_device_ops *ops = NULL;
        struct kvm_device *dev;
        bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
+       int type;
        int ret;
 
        if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
                return -ENODEV;
 
-       ops = kvm_device_ops_table[cd->type];
+       type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
+       ops = kvm_device_ops_table[type];
        if (ops == NULL)
                return -ENODEV;
 
@@ -2997,7 +2999,7 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
        dev->kvm = kvm;
 
        mutex_lock(&kvm->lock);
-       ret = ops->create(dev, cd->type);
+       ret = ops->create(dev, type);
        if (ret < 0) {
                mutex_unlock(&kvm->lock);
                kfree(dev);