Merge tag 'spi-fix-v5.15-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/brooni...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 28 Oct 2021 17:04:39 +0000 (10:04 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 28 Oct 2021 17:04:39 +0000 (10:04 -0700)
Pull spi fixes from Mark Brown:
 "A couple of final driver specific fixes for v5.15, one fixing
  potential ID collisions between two instances of the Altera driver and
  one making Microwire full duplex mode actually work on pl022"

* tag 'spi-fix-v5.15-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi:
  spi: spl022: fix Microwire full duplex mode
  spi: altera: Change to dynamic allocation of spi id

1170 files changed:
.mailmap
CREDITS
Documentation/admin-guide/cgroup-v2.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml
Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml
Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml
Documentation/devicetree/bindings/interconnect/qcom,sdm660.yaml
Documentation/devicetree/bindings/media/i2c/ovti,ov5647.yaml
Documentation/devicetree/bindings/media/i2c/ovti,ov9282.yaml
Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml
Documentation/devicetree/bindings/media/i2c/sony,imx412.yaml
Documentation/devicetree/bindings/mfd/brcm,cru.yaml
Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml
Documentation/devicetree/bindings/net/dsa/marvell.txt
Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml
Documentation/devicetree/bindings/net/snps,dwmac.yaml
Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
Documentation/devicetree/bindings/pinctrl/brcm,ns-pinmux.yaml
Documentation/filesystems/ntfs3.rst
Documentation/gpu/amdgpu.rst
Documentation/gpu/drm-internals.rst
Documentation/hwmon/k10temp.rst
Documentation/networking/devlink/ice.rst
Documentation/networking/mctp.rst
Documentation/userspace-api/vduse.rst
MAINTAINERS
Makefile
arch/arc/include/asm/pgtable.h
arch/arm/Kconfig
arch/arm/boot/compressed/decompress.c
arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
arch/arm/boot/dts/at91-sama7g5ek.dts
arch/arm/boot/dts/bcm2711-rpi-4-b.dts
arch/arm/boot/dts/bcm2711.dtsi
arch/arm/boot/dts/bcm2835-common.dtsi
arch/arm/boot/dts/bcm283x.dtsi
arch/arm/boot/dts/imx53-m53menlo.dts
arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
arch/arm/boot/dts/imx6qdl-pico.dtsi
arch/arm/boot/dts/imx6sx-sdb.dts
arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
arch/arm/boot/dts/omap3430-sdp.dts
arch/arm/boot/dts/qcom-apq8064.dtsi
arch/arm/boot/dts/sama7g5.dtsi
arch/arm/boot/dts/spear3xx.dtsi
arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts
arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
arch/arm/boot/dts/vexpress-v2m.dtsi
arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
arch/arm/boot/dts/vexpress-v2p-ca5s.dts
arch/arm/boot/dts/vexpress-v2p-ca9.dts
arch/arm/common/sharpsl_param.c
arch/arm/configs/gemini_defconfig
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/configs/oxnas_v6_defconfig
arch/arm/configs/shmobile_defconfig
arch/arm/include/asm/uaccess.h
arch/arm/kernel/head.S
arch/arm/kernel/traps.c
arch/arm/kernel/vmlinux-xip.lds.S
arch/arm/mach-at91/pm.c
arch/arm/mach-at91/pm_suspend.S
arch/arm/mach-dove/include/mach/uncompress.h
arch/arm/mach-imx/mach-imx6q.c
arch/arm/mach-imx/pm-imx6.c
arch/arm/mach-imx/src.c
arch/arm/mach-omap1/include/mach/memory.h
arch/arm/mach-omap1/usb.c
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mm/proc-macros.S
arch/arm/net/bpf_jit_32.c
arch/arm/probes/kprobes/core.c
arch/arm64/Kconfig
arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts
arch/arm64/boot/dts/arm/foundation-v8.dtsi
arch/arm64/boot/dts/arm/fvp-base-revc.dts
arch/arm64/boot/dts/arm/juno-base.dtsi
arch/arm64/boot/dts/arm/juno-motherboard.dtsi
arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi
arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
arch/arm64/boot/dts/freescale/imx8mm-evk.dts
arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-s.dts
arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-som.dtsi
arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts
arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts
arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
arch/arm64/boot/dts/freescale/imx8mq-evk.dts
arch/arm64/boot/dts/freescale/imx8mq-kontron-pitx-imx8m.dts
arch/arm64/boot/dts/qcom/pm8150.dtsi
arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
arch/arm64/boot/dts/qcom/sc7280.dtsi
arch/arm64/boot/dts/qcom/sdm630.dtsi
arch/arm64/boot/dts/qcom/sdm845.dtsi
arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
arch/arm64/boot/dts/qcom/sm8250.dtsi
arch/arm64/configs/defconfig
arch/arm64/kvm/hyp/include/nvhe/gfp.h
arch/arm64/kvm/hyp/nvhe/Makefile
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/arm64/kvm/hyp/nvhe/page_alloc.c
arch/arm64/kvm/mmu.c
arch/arm64/kvm/perf.c
arch/arm64/kvm/pmu-emul.c
arch/arm64/mm/hugetlbpage.c
arch/csky/Kconfig
arch/csky/include/asm/bitops.h
arch/csky/kernel/ptrace.c
arch/csky/kernel/signal.c
arch/ia64/Kconfig
arch/m68k/68000/entry.S
arch/m68k/Kconfig
arch/m68k/coldfire/entry.S
arch/m68k/include/asm/processor.h
arch/m68k/include/asm/segment.h [deleted file]
arch/m68k/include/asm/thread_info.h
arch/m68k/include/asm/tlbflush.h
arch/m68k/include/asm/traps.h
arch/m68k/include/asm/uaccess.h
arch/m68k/kernel/asm-offsets.c
arch/m68k/kernel/entry.S
arch/m68k/kernel/process.c
arch/m68k/kernel/signal.c
arch/m68k/kernel/traps.c
arch/m68k/mac/misc.c
arch/m68k/mm/cache.c
arch/m68k/mm/init.c
arch/m68k/mm/kmap.c
arch/m68k/mm/memory.c
arch/m68k/mm/motorola.c
arch/m68k/sun3/config.c
arch/m68k/sun3/mmu_emu.c
arch/m68k/sun3/sun3ints.c
arch/m68k/sun3x/prom.c
arch/mips/Kconfig
arch/mips/include/asm/mips-cps.h
arch/mips/net/bpf_jit.c
arch/nds32/kernel/ftrace.c
arch/nios2/Kconfig.debug
arch/nios2/include/asm/irqflags.h
arch/nios2/include/asm/registers.h
arch/nios2/kernel/setup.c
arch/nios2/platform/Kconfig.platform
arch/parisc/Kconfig
arch/powerpc/boot/dts/fsl/t1023rdb.dts
arch/powerpc/include/asm/book3s/32/kup.h
arch/powerpc/include/asm/code-patching.h
arch/powerpc/include/asm/interrupt.h
arch/powerpc/include/asm/security_features.h
arch/powerpc/kernel/dma-iommu.c
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/idle_book3s.S
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/security.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/traps.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/lib/code-patching.c
arch/powerpc/net/bpf_jit.h
arch/powerpc/net/bpf_jit64.h
arch/powerpc/net/bpf_jit_comp.c
arch/powerpc/net/bpf_jit_comp32.c
arch/powerpc/net/bpf_jit_comp64.c
arch/powerpc/platforms/pseries/eeh_pseries.c
arch/powerpc/platforms/pseries/msi.c
arch/powerpc/sysdev/xive/common.c
arch/riscv/Kconfig
arch/riscv/include/asm/syscall.h
arch/riscv/include/asm/vdso.h
arch/riscv/include/uapi/asm/unistd.h
arch/riscv/kernel/syscall_table.c
arch/riscv/kernel/vdso.c
arch/riscv/kernel/vdso/vdso.lds.S
arch/riscv/mm/cacheflush.c
arch/s390/include/asm/pci.h
arch/s390/kvm/gaccess.c
arch/s390/kvm/intercept.c
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/kvm-s390.h
arch/s390/lib/string.c
arch/s390/net/bpf_jit_comp.c
arch/s390/pci/pci.c
arch/s390/pci/pci_event.c
arch/x86/Kconfig
arch/x86/crypto/sm4-aesni-avx-asm_64.S
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/events/msr.c
arch/x86/hyperv/hv_apic.c
arch/x86/include/asm/entry-common.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/kvm_page_track.h
arch/x86/include/asm/kvmclock.h
arch/x86/include/asm/xen/pci.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/resctrl/core.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/fpu/signal.c
arch/x86/kernel/hpet.c
arch/x86/kernel/kvmclock.c
arch/x86/kernel/sev-shared.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/emulate.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/hyperv.h
arch/x86/kvm/ioapic.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/page_track.c
arch/x86/kvm/mmu/paging_tmpl.h
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/vmx/evmcs.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c
arch/x86/net/bpf_jit_comp.c
arch/x86/pci/xen.c
arch/x86/platform/olpc/olpc.c
arch/x86/platform/pvh/enlighten.c
arch/x86/xen/Kconfig
arch/x86/xen/Makefile
arch/x86/xen/enlighten.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/enlighten_pvh.c
arch/x86/xen/mmu_pv.c
arch/x86/xen/xen-ops.h
arch/xtensa/include/asm/kmem_layout.h
arch/xtensa/kernel/irq.c
arch/xtensa/kernel/setup.c
arch/xtensa/mm/mmu.c
arch/xtensa/platforms/xtfpga/setup.c
block/bdev.c
block/bfq-cgroup.c
block/bfq-iosched.c
block/blk-cgroup.c
block/blk-core.c
block/blk-mq-debugfs.c
block/blk-mq.c
block/blk.h
block/genhd.c
block/kyber-iosched.c
block/partitions/core.c
drivers/Kconfig
drivers/acpi/arm64/gtdt.c
drivers/acpi/nfit/core.c
drivers/acpi/power.c
drivers/acpi/tables.c
drivers/acpi/x86/s2idle.c
drivers/ata/libahci_platform.c
drivers/ata/pata_legacy.c
drivers/ata/sata_mv.c
drivers/base/core.c
drivers/base/regmap/regcache-rbtree.c
drivers/base/test/Makefile
drivers/block/brd.c
drivers/block/nbd.c
drivers/block/rnbd/rnbd-clt-sysfs.c
drivers/block/virtio_blk.c
drivers/bus/Kconfig
drivers/bus/Makefile
drivers/bus/simple-pm-bus.c
drivers/bus/ti-sysc.c
drivers/clk/qcom/Kconfig
drivers/clk/qcom/gcc-sm6115.c
drivers/clk/renesas/r9a07g044-cpg.c
drivers/clk/renesas/rzg2l-cpg.c
drivers/clk/socfpga/clk-agilex.c
drivers/crypto/ccp/ccp-ops.c
drivers/edac/armada_xp_edac.c
drivers/firmware/Kconfig
drivers/firmware/arm_ffa/bus.c
drivers/firmware/arm_scmi/Kconfig
drivers/firmware/arm_scmi/virtio.c
drivers/firmware/efi/cper.c
drivers/firmware/efi/libstub/fdt.c
drivers/firmware/efi/runtime-wrappers.c
drivers/fpga/ice40-spi.c
drivers/gpio/gpio-74x164.c
drivers/gpio/gpio-mockup.c
drivers/gpio/gpio-pca953x.c
drivers/gpio/gpio-rockchip.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/display/Kconfig
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
drivers/gpu/drm/amd/display/include/dal_asic_id.h
drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h
drivers/gpu/drm/ast/ast_mode.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/exynos/exynos_drm_rotator.c
drivers/gpu/drm/exynos/exynos_drm_scaler.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/hyperv/hyperv_drm.h
drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
drivers/gpu/drm/hyperv/hyperv_drm_proto.c
drivers/gpu/drm/i915/display/icl_dsi.c
drivers/gpu/drm/i915/display/intel_acpi.c
drivers/gpu/drm/i915/display/intel_audio.c
drivers/gpu/drm/i915/display/intel_bios.c
drivers/gpu/drm/i915/display/intel_ddi.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_vbt_defs.h
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
drivers/gpu/drm/i915/gt/intel_context.c
drivers/gpu/drm/i915/gt/intel_rps.c
drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/kmb/kmb_crtc.c
drivers/gpu/drm/kmb/kmb_drv.c
drivers/gpu/drm/kmb/kmb_drv.h
drivers/gpu/drm/kmb/kmb_dsi.c
drivers/gpu/drm/kmb/kmb_dsi.h
drivers/gpu/drm/kmb/kmb_plane.c
drivers/gpu/drm/kmb/kmb_plane.h
drivers/gpu/drm/kmb/kmb_regs.h
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/adreno/a3xx_gpu.c
drivers/gpu/drm/msm/adreno/a4xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
drivers/gpu/drm/msm/adreno/a6xx_gmu.h
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.h
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/dp/dp_display.c
drivers/gpu/drm/msm/dsi/dsi.c
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
drivers/gpu/drm/msm/edp/edp_ctrl.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gpu.h
drivers/gpu/drm/msm/msm_gpu_devfreq.c
drivers/gpu/drm/msm/msm_submitqueue.c
drivers/gpu/drm/mxsfb/mxsfb_drv.c
drivers/gpu/drm/nouveau/dispnv50/crc.c
drivers/gpu/drm/nouveau/dispnv50/head.c
drivers/gpu/drm/nouveau/include/nvif/class.h
drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_chan.c
drivers/gpu/drm/nouveau/nouveau_debugfs.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nv84_fence.c
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c
drivers/gpu/drm/panel/Kconfig
drivers/gpu/drm/panel/panel-abt-y030xx067a.c
drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
drivers/gpu/drm/r128/ati_pcigart.c
drivers/gpu/drm/rcar-du/rcar_du_encoder.c
drivers/gpu/drm/rcar-du/rcar_lvds.c
drivers/gpu/drm/rcar-du/rcar_lvds.h
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/dc.h
drivers/gpu/drm/tegra/uapi.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/gpu/host1x/fence.c
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
drivers/hid/hid-apple.c
drivers/hid/hid-betopff.c
drivers/hid/hid-u2fzero.c
drivers/hid/wacom_wac.c
drivers/hv/hyperv_vmbus.h
drivers/hwmon/k10temp.c
drivers/hwmon/ltc2947-core.c
drivers/hwmon/mlxreg-fan.c
drivers/hwmon/occ/common.c
drivers/hwmon/pmbus/ibm-cffps.c
drivers/hwmon/pmbus/mp2975.c
drivers/hwmon/tmp421.c
drivers/hwmon/w83791d.c
drivers/hwmon/w83792d.c
drivers/hwmon/w83793.c
drivers/i2c/busses/i2c-mlxcpld.c
drivers/i2c/busses/i2c-mt65xx.c
drivers/i2c/i2c-core-acpi.c
drivers/iio/accel/fxls8962af-core.c
drivers/iio/adc/ad7192.c
drivers/iio/adc/ad7780.c
drivers/iio/adc/ad7793.c
drivers/iio/adc/aspeed_adc.c
drivers/iio/adc/max1027.c
drivers/iio/adc/mt6577_auxadc.c
drivers/iio/adc/rzg2l_adc.c
drivers/iio/adc/ti-adc128s052.c
drivers/iio/common/ssp_sensors/ssp_spi.c
drivers/iio/dac/ti-dac5571.c
drivers/iio/imu/adis16475.c
drivers/iio/imu/adis16480.c
drivers/iio/light/opt3001.c
drivers/iio/test/Makefile
drivers/infiniband/core/cma.c
drivers/infiniband/core/cma_priv.h
drivers/infiniband/core/sa_query.c
drivers/infiniband/hw/hfi1/ipoib_tx.c
drivers/infiniband/hw/hfi1/pio.c
drivers/infiniband/hw/hns/hns_roce_cq.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/irdma/cm.c
drivers/infiniband/hw/irdma/hw.c
drivers/infiniband/hw/irdma/i40iw_if.c
drivers/infiniband/hw/irdma/main.h
drivers/infiniband/hw/irdma/uk.c
drivers/infiniband/hw/irdma/user.h
drivers/infiniband/hw/irdma/utils.c
drivers/infiniband/hw/irdma/verbs.c
drivers/infiniband/hw/irdma/ws.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/qedr/qedr.h
drivers/infiniband/hw/qedr/qedr_iw_cm.c
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/hw/qib/qib_sysfs.c
drivers/infiniband/hw/qib/qib_user_sdma.c
drivers/infiniband/hw/usnic/usnic_ib.h
drivers/infiniband/hw/usnic/usnic_ib_main.c
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
drivers/infiniband/sw/rdmavt/qp.c
drivers/input/joystick/xpad.c
drivers/input/keyboard/snvs_pwrkey.c
drivers/input/touchscreen.c
drivers/input/touchscreen/resistive-adc-touch.c
drivers/interconnect/qcom/sdm660.c
drivers/iommu/Kconfig
drivers/iommu/apple-dart.c
drivers/iommu/arm/arm-smmu/Makefile
drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
drivers/iommu/intel/dmar.c
drivers/ipack/devices/ipoctal.c
drivers/isdn/capi/kcapi.c
drivers/isdn/hardware/mISDN/hfcpci.c
drivers/isdn/hardware/mISDN/netjet.c
drivers/md/dm-clone-target.c
drivers/md/dm-rq.c
drivers/md/dm-verity-target.c
drivers/md/dm.c
drivers/media/platform/Kconfig
drivers/media/platform/s5p-jpeg/jpeg-core.c
drivers/media/platform/s5p-jpeg/jpeg-core.h
drivers/media/rc/ir_toy.c
drivers/misc/Kconfig
drivers/misc/cb710/sgbuf2.c
drivers/misc/eeprom/at25.c
drivers/misc/eeprom/eeprom_93xx46.c
drivers/misc/fastrpc.c
drivers/misc/gehc-achc.c
drivers/misc/habanalabs/common/command_submission.c
drivers/misc/mei/hbm.c
drivers/misc/mei/hw-me-regs.h
drivers/misc/mei/pci-me.c
drivers/mmc/host/Kconfig
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/meson-gx-mmc.c
drivers/mmc/host/renesas_sdhi_core.c
drivers/mmc/host/sdhci-of-at91.c
drivers/mtd/nand/raw/qcom_nandc.c
drivers/net/can/m_can/m_can_platform.c
drivers/net/can/rcar/rcar_can.c
drivers/net/can/sja1000/peak_pci.c
drivers/net/can/usb/peak_usb/pcan_usb_fd.c
drivers/net/dsa/lantiq_gswip.c
drivers/net/dsa/microchip/ksz_common.c
drivers/net/dsa/mt7530.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/chip.h
drivers/net/dsa/mv88e6xxx/global1.c
drivers/net/dsa/mv88e6xxx/port.c
drivers/net/dsa/mv88e6xxx/port.h
drivers/net/dsa/ocelot/felix.c
drivers/net/dsa/ocelot/felix.h
drivers/net/dsa/sja1105/sja1105_main.c
drivers/net/dsa/sja1105/sja1105_ptp.c
drivers/net/dsa/sja1105/sja1105_ptp.h
drivers/net/ethernet/Kconfig
drivers/net/ethernet/arc/Kconfig
drivers/net/ethernet/broadcom/bgmac-platform.c
drivers/net/ethernet/cavium/thunder/nic_main.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
drivers/net/ethernet/freescale/enetc/enetc_pf.c
drivers/net/ethernet/google/gve/gve.h
drivers/net/ethernet/google/gve/gve_main.c
drivers/net/ethernet/google/gve/gve_rx.c
drivers/net/ethernet/hisilicon/hns3/hnae3.c
drivers/net/ethernet/hisilicon/hns3/hnae3.h
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/hisilicon/hns_mdio.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/e100.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/ich8lan.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/ice/ice_common.c
drivers/net/ethernet/intel/ice/ice_devids.h
drivers/net/ethernet/intel/ice/ice_devlink.c
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_ptp.c
drivers/net/ethernet/intel/ice/ice_sched.c
drivers/net/ethernet/intel/ice/ice_sched.h
drivers/net/ethernet/intel/igc/igc_hw.h
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/mellanox/mlx5/core/cq.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
drivers/net/ethernet/mellanox/mlx5/core/lag.c
drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
drivers/net/ethernet/micrel/Makefile
drivers/net/ethernet/micrel/ks8851_common.c
drivers/net/ethernet/microchip/encx24j600-regmap.c
drivers/net/ethernet/microchip/encx24j600.c
drivers/net/ethernet/microchip/encx24j600_hw.h
drivers/net/ethernet/microchip/sparx5/sparx5_main.c
drivers/net/ethernet/microsoft/mana/mana_en.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot_net.c
drivers/net/ethernet/mscc/ocelot_vcap.c
drivers/net/ethernet/mscc/ocelot_vsc7514.c
drivers/net/ethernet/neterion/s2io.c
drivers/net/ethernet/netronome/nfp/flower/main.c
drivers/net/ethernet/netronome/nfp/nfp_asm.c
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
drivers/net/ethernet/pensando/ionic/ionic_stats.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/sfc/mcdi_port_common.c
drivers/net/ethernet/sfc/ptp.c
drivers/net/ethernet/sfc/siena_sriov.c
drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
drivers/net/ethernet/stmicro/stmmac/hwif.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/sun/Kconfig
drivers/net/hamradio/Kconfig
drivers/net/hamradio/baycom_epp.c
drivers/net/ipa/Kconfig
drivers/net/mdio/mdio-ipq4019.c
drivers/net/mdio/mdio-mscc-miim.c
drivers/net/mhi_net.c
drivers/net/pcs/pcs-xpcs.c
drivers/net/phy/bcm7xxx.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/mxl-gpy.c
drivers/net/phy/phy_device.c
drivers/net/phy/sfp.c
drivers/net/usb/Kconfig
drivers/net/usb/r8152.c
drivers/net/usb/smsc95xx.c
drivers/net/usb/usbnet.c
drivers/net/virtio_net.c
drivers/net/vrf.c
drivers/net/wireless/ath/ath10k/Kconfig
drivers/net/wireless/ath/ath5k/Kconfig
drivers/net/wireless/ath/ath5k/led.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/marvell/mwifiex/sta_tx.c
drivers/net/wireless/marvell/mwifiex/uap_txrx.c
drivers/nfc/st95hf/core.c
drivers/nvdimm/pmem.c
drivers/nvme/host/core.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvmem/core.c
drivers/of/base.c
drivers/of/of_reserved_mem.c
drivers/pci/controller/pci-hyperv.c
drivers/pci/hotplug/s390_pci_hpc.c
drivers/pci/msi.c
drivers/pci/pci-acpi.c
drivers/perf/arm_pmu.c
drivers/pinctrl/bcm/pinctrl-ns.c
drivers/pinctrl/core.c
drivers/pinctrl/pinctrl-amd.c
drivers/pinctrl/pinctrl-amd.h
drivers/pinctrl/pinctrl-rockchip.c
drivers/pinctrl/pinctrl-rockchip.h
drivers/pinctrl/qcom/Kconfig
drivers/pinctrl/qcom/pinctrl-sc7280.c
drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
drivers/pinctrl/stm32/pinctrl-stm32.c
drivers/platform/mellanox/mlxreg-io.c
drivers/platform/x86/amd-pmc.c
drivers/platform/x86/dell/Kconfig
drivers/platform/x86/gigabyte-wmi.c
drivers/platform/x86/intel/int1092/intel_sar.c
drivers/platform/x86/intel/int3472/intel_skl_int3472_discrete.c
drivers/platform/x86/intel_scu_ipc.c
drivers/ptp/ptp_clock.c
drivers/ptp/ptp_kvm_x86.c
drivers/ptp/ptp_pch.c
drivers/reset/Kconfig
drivers/reset/reset-brcmstb-rescal.c
drivers/reset/reset-socfpga.c
drivers/reset/tegra/reset-bpmp.c
drivers/s390/cio/blacklist.c
drivers/s390/cio/css.c
drivers/s390/cio/css.h
drivers/s390/crypto/vfio_ap_ops.c
drivers/scsi/arm/acornscsi.c
drivers/scsi/csiostor/csio_init.c
drivers/scsi/elx/efct/efct_scsi.c
drivers/scsi/hosts.c
drivers/scsi/libiscsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/mpi3mr/mpi3mr_os.c
drivers/scsi/qla2xxx/qla_bsg.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/scsi.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
drivers/scsi/ses.c
drivers/scsi/storvsc_drv.c
drivers/scsi/ufs/ufshcd-pci.c
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h
drivers/scsi/virtio_scsi.c
drivers/soc/canaan/Kconfig
drivers/soc/qcom/mdt_loader.c
drivers/soc/qcom/socinfo.c
drivers/soc/ti/omap_prm.c
drivers/spi/spi-tegra20-slink.c
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c
drivers/staging/media/hantro/hantro_drv.c
drivers/staging/media/sunxi/cedrus/cedrus_video.c
drivers/staging/r8188eu/hal/hal_intf.c
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
drivers/tee/optee/core.c
drivers/tee/optee/device.c
drivers/tee/optee/optee_private.h
drivers/tee/optee/shm_pool.c
drivers/thunderbolt/Makefile
drivers/tty/hvc/hvc_xen.c
drivers/tty/serial/8250/Kconfig
drivers/usb/chipidea/ci_hdrc_imx.c
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-wdm.c
drivers/usb/common/Kconfig
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/f_uac2.c
drivers/usb/host/ohci-omap.c
drivers/usb/host/xhci-dbgtty.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci-tegra.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/musb/musb_dsps.c
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/usb/typec/tcpm/tcpci.c
drivers/usb/typec/tcpm/tcpm.c
drivers/usb/typec/tipd/core.c
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vdpa/vdpa_user/vduse_dev.c
drivers/vfio/pci/vfio_pci_core.c
drivers/vhost/vdpa.c
drivers/video/fbdev/Kconfig
drivers/video/fbdev/gbefb.c
drivers/virtio/virtio.c
drivers/virtio/virtio_ring.c
drivers/watchdog/Kconfig
drivers/watchdog/iTCO_wdt.c
drivers/watchdog/ixp4xx_wdt.c
drivers/watchdog/omap_wdt.c
drivers/watchdog/sbsa_gwdt.c
drivers/xen/Kconfig
drivers/xen/balloon.c
drivers/xen/privcmd.c
fs/9p/cache.c
fs/9p/fid.c
fs/9p/v9fs.c
fs/9p/vfs_addr.c
fs/9p/vfs_file.c
fs/9p/vfs_inode.c
fs/9p/vfs_inode_dotl.c
fs/afs/dir_silly.c
fs/afs/write.c
fs/autofs/waitq.c
fs/binfmt_elf.c
fs/btrfs/ctree.h
fs/btrfs/dir-item.c
fs/btrfs/extent-tree.c
fs/btrfs/file.c
fs/btrfs/tree-log.c
fs/ceph/caps.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/ceph/super.c
fs/ceph/super.h
fs/debugfs/inode.c
fs/ext4/dir.c
fs/ext4/ext4.h
fs/ext4/extents.c
fs/ext4/fast_commit.c
fs/ext4/inline.c
fs/ext4/inode.c
fs/ext4/super.c
fs/fscache/object.c
fs/fscache/operation.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/fuse/virtio_fs.c
fs/io-wq.c
fs/io_uring.c
fs/kernel_read_file.c
fs/kernfs/dir.c
fs/ksmbd/auth.c
fs/ksmbd/connection.c
fs/ksmbd/crypto_ctx.c
fs/ksmbd/crypto_ctx.h
fs/ksmbd/glob.h
fs/ksmbd/ksmbd_netlink.h
fs/ksmbd/mgmt/user_config.c
fs/ksmbd/mgmt/user_config.h
fs/ksmbd/misc.c
fs/ksmbd/oplock.c
fs/ksmbd/smb2misc.c
fs/ksmbd/smb2ops.c
fs/ksmbd/smb2pdu.c
fs/ksmbd/smb2pdu.h
fs/ksmbd/smb_common.c
fs/ksmbd/smb_common.h
fs/ksmbd/smbacl.c
fs/ksmbd/transport_ipc.c
fs/ksmbd/transport_ipc.h
fs/ksmbd/transport_rdma.c
fs/ksmbd/transport_tcp.c
fs/ksmbd/vfs.c
fs/ksmbd/vfs.h
fs/netfs/read_helper.c
fs/nfs_common/grace.c
fs/nfsd/filecache.c
fs/nfsd/nfs4xdr.c
fs/nfsd/nfsctl.c
fs/ntfs3/attrib.c
fs/ntfs3/attrlist.c
fs/ntfs3/bitfunc.c
fs/ntfs3/bitmap.c
fs/ntfs3/debug.h
fs/ntfs3/dir.c
fs/ntfs3/file.c
fs/ntfs3/frecord.c
fs/ntfs3/fslog.c
fs/ntfs3/fsntfs.c
fs/ntfs3/index.c
fs/ntfs3/inode.c
fs/ntfs3/lib/decompress_common.h
fs/ntfs3/lib/lib.h
fs/ntfs3/lznt.c
fs/ntfs3/namei.c
fs/ntfs3/ntfs.h
fs/ntfs3/ntfs_fs.h
fs/ntfs3/record.c
fs/ntfs3/run.c
fs/ntfs3/super.c
fs/ntfs3/upcase.c
fs/ntfs3/xattr.c
fs/ocfs2/alloc.c
fs/ocfs2/super.c
fs/overlayfs/dir.c
fs/overlayfs/file.c
fs/userfaultfd.c
fs/vboxsf/super.c
fs/verity/enable.c
fs/verity/open.c
include/acpi/platform/acgcc.h
include/asm-generic/io.h
include/kunit/test.h
include/kvm/arm_pmu.h
include/linux/arm-smccc.h
include/linux/bpf.h
include/linux/cpuhotplug.h
include/linux/cpumask.h
include/linux/dsa/mv88e6xxx.h [new file with mode: 0644]
include/linux/dsa/ocelot.h
include/linux/dsa/sja1105.h
include/linux/elfcore.h
include/linux/etherdevice.h
include/linux/fwnode.h
include/linux/genhd.h
include/linux/kvm_host.h
include/linux/memory.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/perf/arm_pmu.h
include/linux/perf_event.h
include/linux/platform_data/usb-omap1.h
include/linux/qcom_scm.h
include/linux/sched.h
include/linux/secretmem.h
include/linux/trace_recursion.h
include/linux/user_namespace.h
include/linux/workqueue.h
include/net/ip_fib.h
include/net/mac80211.h
include/net/mctp.h
include/net/netfilter/ipv6/nf_defrag_ipv6.h
include/net/netfilter/nf_tables.h
include/net/netns/netfilter.h
include/net/nexthop.h
include/net/pkt_sched.h
include/net/sctp/sm.h
include/net/sock.h
include/net/tcp.h
include/soc/mscc/ocelot.h
include/soc/mscc/ocelot_ptp.h
include/soc/mscc/ocelot_vcap.h
include/sound/hda_codec.h
include/sound/rawmidi.h
include/trace/events/cachefiles.h
include/trace/events/kyber.h
include/uapi/linux/hyperv.h
include/uapi/linux/mctp.h
include/uapi/linux/xfrm.h
include/uapi/misc/habanalabs.h
include/uapi/sound/asound.h
include/xen/xen-ops.h
init/main.c
kernel/auditsc.c
kernel/bpf/bpf_struct_ops.c
kernel/bpf/core.c
kernel/bpf/stackmap.c
kernel/cgroup/cgroup.c
kernel/cgroup/cpuset.c
kernel/cred.c
kernel/dma/debug.c
kernel/dma/debug.h
kernel/dma/mapping.c
kernel/events/core.c
kernel/module.c
kernel/sched/core.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/signal.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace_eprobe.c
kernel/trace/trace_events_hist.c
kernel/ucount.c
kernel/workqueue.c
lib/Makefile
lib/kunit/executor_test.c
mm/huge_memory.c
mm/memblock.c
mm/mempolicy.c
mm/migrate.c
mm/page_ext.c
mm/secretmem.c
mm/slab.c
mm/slub.c
net/bpf/test_run.c
net/bridge/br_multicast.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/bridge/netfilter/ebtables.c
net/can/isotp.c
net/can/j1939/j1939-priv.h
net/can/j1939/main.c
net/can/j1939/transport.c
net/core/dev_addr_lists.c
net/core/net-procfs.c
net/core/rtnetlink.c
net/core/sock.c
net/dsa/Kconfig
net/dsa/dsa2.c
net/dsa/switch.c
net/dsa/tag_dsa.c
net/dsa/tag_ocelot.c
net/dsa/tag_ocelot_8021q.c
net/dsa/tag_sja1105.c
net/ipv4/fib_semantics.c
net/ipv4/icmp.c
net/ipv4/inet_hashtables.c
net/ipv4/netfilter/iptable_raw.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/tcp_ipv4.c
net/ipv4/udp.c
net/ipv6/inet6_hashtables.c
net/ipv6/ioam6.c
net/ipv6/ioam6_iptunnel.c
net/ipv6/ip6_output.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6t_rt.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/mac80211/mesh_pathtbl.c
net/mac80211/mesh_ps.c
net/mac80211/rate.c
net/mac80211/rx.c
net/mac80211/tx.c
net/mac80211/wpa.c
net/mptcp/mptcp_diag.c
net/mptcp/pm_netlink.c
net/mptcp/protocol.c
net/mptcp/protocol.h
net/mptcp/subflow.c
net/mptcp/syncookies.c
net/mptcp/token.c
net/mptcp/token_test.c
net/netfilter/Kconfig
net/netfilter/ipset/ip_set_hash_gen.h
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_nat_core.c
net/netfilter/nf_nat_masquerade.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_chain_filter.c
net/netfilter/nft_compat.c
net/netfilter/nft_quota.c
net/netfilter/xt_IDLETIMER.c
net/netfilter/xt_LOG.c
net/netfilter/xt_NFLOG.c
net/netlink/af_netlink.c
net/nfc/af_nfc.c
net/nfc/digital_core.c
net/nfc/digital_technology.c
net/nfc/nci/rsp.c
net/sched/act_ct.c
net/sched/cls_flower.c
net/sched/sch_api.c
net/sched/sch_fifo.c
net/sched/sch_mqprio.c
net/sched/sch_taprio.c
net/sctp/input.c
net/sctp/sm_make_chunk.c
net/smc/smc_cdc.c
net/smc/smc_core.c
net/smc/smc_llc.c
net/smc/smc_tx.c
net/smc/smc_wr.h
net/sunrpc/auth_gss/svcauth_gss.c
net/unix/af_unix.c
net/xfrm/xfrm_user.c
samples/bpf/Makefile
samples/bpf/bpf_insn.h
samples/bpf/xdp_redirect_map_multi.bpf.c
scripts/Makefile.gcc-plugins
scripts/checksyscalls.sh
scripts/recordmcount.pl
security/keys/process_keys.c
security/selinux/nlmsgtab.c
sound/core/pcm_compat.c
sound/core/rawmidi.c
sound/core/seq_device.c
sound/drivers/pcsp/pcsp_lib.c
sound/firewire/motu/amdtp-motu.c
sound/firewire/oxfw/oxfw.c
sound/hda/hdac_controller.c
sound/pci/hda/hda_bind.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_controller.c
sound/pci/hda/hda_controller.h
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_intel.h
sound/pci/hda/patch_cs8409.c
sound/pci/hda/patch_realtek.c
sound/pci/pcxhr/pcxhr_core.c
sound/soc/codecs/Kconfig
sound/soc/codecs/cs42l42.c
sound/soc/codecs/cs4341.c
sound/soc/codecs/nau8824.c
sound/soc/codecs/pcm179x-spi.c
sound/soc/codecs/pcm512x.c
sound/soc/codecs/wcd938x.c
sound/soc/codecs/wm8960.c
sound/soc/fsl/fsl_esai.c
sound/soc/fsl/fsl_micfil.c
sound/soc/fsl/fsl_sai.c
sound/soc/fsl/fsl_spdif.c
sound/soc/fsl/fsl_xcvr.c
sound/soc/intel/boards/bytcht_es8316.c
sound/soc/intel/boards/sof_sdw.c
sound/soc/mediatek/Kconfig
sound/soc/mediatek/common/mtk-afe-fe-dai.c
sound/soc/mediatek/mt8195/mt8195-mt6359-rt1019-rt5682.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/soc/sof/core.c
sound/soc/sof/imx/imx8.c
sound/soc/sof/imx/imx8m.c
sound/soc/sof/loader.c
sound/soc/sof/trace.c
sound/soc/sof/xtensa/core.c
sound/usb/card.c
sound/usb/mixer.c
sound/usb/mixer.h
sound/usb/mixer_quirks.c
sound/usb/mixer_scarlett_gen2.c
sound/usb/quirks-table.h
sound/usb/quirks.c
tools/include/uapi/sound/asound.h
tools/kvm/kvm_stat/kvm_stat
tools/lib/bpf/libbpf.c
tools/lib/bpf/linker.c
tools/lib/bpf/strset.c
tools/lib/perf/tests/test-evlist.c
tools/lib/perf/tests/test-evsel.c
tools/objtool/arch/x86/decode.c
tools/objtool/check.c
tools/objtool/elf.c
tools/objtool/include/objtool/elf.h
tools/objtool/orc_gen.c
tools/objtool/special.c
tools/perf/Documentation/jitdump-specification.txt
tools/perf/Documentation/perf-c2c.txt
tools/perf/Documentation/perf-intel-pt.txt
tools/perf/Documentation/perf-lock.txt
tools/perf/Documentation/perf-script-perl.txt
tools/perf/Documentation/perf-script-python.txt
tools/perf/Documentation/perf-stat.txt
tools/perf/Documentation/topdown.txt
tools/perf/Makefile.config
tools/perf/Makefile.perf
tools/perf/arch/arm/util/auxtrace.c
tools/perf/arch/arm/util/cs-etm.c
tools/perf/arch/arm/util/perf_regs.c
tools/perf/arch/arm/util/pmu.c
tools/perf/arch/arm/util/unwind-libdw.c
tools/perf/arch/arm/util/unwind-libunwind.c
tools/perf/arch/x86/util/iostat.c
tools/perf/builtin-stat.c
tools/perf/pmu-events/arch/powerpc/power8/other.json
tools/perf/pmu-events/jevents.c
tools/perf/tests/attr/test-stat-default
tools/perf/tests/attr/test-stat-detailed-1
tools/perf/tests/attr/test-stat-detailed-2
tools/perf/tests/attr/test-stat-detailed-3
tools/perf/tests/code-reading.c
tools/perf/tests/dwarf-unwind.c
tools/perf/util/config.c
tools/perf/util/session.c
tools/testing/kunit/kunit.py
tools/testing/kunit/kunit_tool_test.py
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/test_lwt_ip_encap.sh
tools/testing/selftests/drivers/dma-buf/udmabuf.c
tools/testing/selftests/ftrace/test.d/dynevent/add_remove_eprobe.tc
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/access_tracking_perf_test.c
tools/testing/selftests/kvm/demand_paging_test.c
tools/testing/selftests/kvm/dirty_log_perf_test.c
tools/testing/selftests/kvm/include/test_util.h
tools/testing/selftests/kvm/include/x86_64/processor.h
tools/testing/selftests/kvm/kvm_page_table_test.c
tools/testing/selftests/kvm/lib/test_util.c
tools/testing/selftests/kvm/rseq_test.c
tools/testing/selftests/kvm/steal_time.c
tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c [new file with mode: 0644]
tools/testing/selftests/net/config
tools/testing/selftests/net/fcnal-test.sh
tools/testing/selftests/net/forwarding/Makefile
tools/testing/selftests/net/forwarding/forwarding.config.sample
tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh [new file with mode: 0755]
tools/testing/selftests/net/forwarding/lib.sh
tools/testing/selftests/net/ioam6.sh
tools/testing/selftests/net/ioam6_parser.c
tools/testing/selftests/net/nettest.c
tools/testing/selftests/netfilter/nft_flowtable.sh
tools/testing/selftests/netfilter/nft_nat.sh
tools/testing/selftests/netfilter/nft_nat_zones.sh [new file with mode: 0755]
tools/testing/selftests/netfilter/nft_zones_many.sh [new file with mode: 0755]
tools/testing/selftests/vm/userfaultfd.c
tools/testing/vsock/vsock_diag_test.c
virt/kvm/kvm_main.c

index 6e849110cb4e3ad65e2b01eb1b7d2b6fe96c3215..90e614d2bf7e3dd34c1718f89e4d7756e639fb30 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -33,6 +33,8 @@ Al Viro <viro@zenIV.linux.org.uk>
 Andi Kleen <ak@linux.intel.com> <ak@suse.de>
 Andi Shyti <andi@etezian.org> <andi.shyti@samsung.com>
 Andreas Herrmann <aherrman@de.ibm.com>
+Andrej Shadura <andrew.shadura@collabora.co.uk>
+Andrej Shadura <andrew@shadura.me> <andrew@beldisplaytech.com>
 Andrew Morton <akpm@linux-foundation.org>
 Andrew Murray <amurray@thegoodpenguin.co.uk> <amurray@embedded-bits.co.uk>
 Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com>
diff --git a/CREDITS b/CREDITS
index 7ef7b136e71d2d9dbc224aa435cb824a1aa1c65e..d8f63e8329e8fccbb8cc53d8d15a5e18c5390d64 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -971,6 +971,7 @@ D: PowerPC
 N: Daniel Drake
 E: dsd@gentoo.org
 D: USBAT02 CompactFlash support in usb-storage
+D: ZD1211RW wireless driver
 S: UK
 
 N: Oleg Drokin
index babbe04c8d37d8a9c95c93a6916c47137672c816..4d8c27eca96be72ea0df121978ad04dd588e6ae3 100644 (file)
@@ -1226,7 +1226,7 @@ PAGE_SIZE multiple when read back.
 
        Note that all fields in this file are hierarchical and the
        file modified event can be generated due to an event down the
-       hierarchy. For for the local events at the cgroup level see
+       hierarchy. For the local events at the cgroup level see
        memory.events.local.
 
          low
@@ -2170,19 +2170,19 @@ existing device files.
 
 Cgroup v2 device controller has no interface files and is implemented
 on top of cgroup BPF. To control access to device files, a user may
-create bpf programs of the BPF_CGROUP_DEVICE type and attach them
-to cgroups. On an attempt to access a device file, corresponding
-BPF programs will be executed, and depending on the return value
-the attempt will succeed or fail with -EPERM.
-
-A BPF_CGROUP_DEVICE program takes a pointer to the bpf_cgroup_dev_ctx
-structure, which describes the device access attempt: access type
-(mknod/read/write) and device (type, major and minor numbers).
-If the program returns 0, the attempt fails with -EPERM, otherwise
-it succeeds.
-
-An example of BPF_CGROUP_DEVICE program may be found in the kernel
-source tree in the tools/testing/selftests/bpf/progs/dev_cgroup.c file.
+create bpf programs of type BPF_PROG_TYPE_CGROUP_DEVICE and attach
+them to cgroups with BPF_CGROUP_DEVICE flag. On an attempt to access a
+device file, corresponding BPF programs will be executed, and depending
+on the return value the attempt will succeed or fail with -EPERM.
+
+A BPF_PROG_TYPE_CGROUP_DEVICE program takes a pointer to the
+bpf_cgroup_dev_ctx structure, which describes the device access attempt:
+access type (mknod/read/write) and device (type, major and minor numbers).
+If the program returns 0, the attempt fails with -EPERM, otherwise it
+succeeds.
+
+An example of BPF_PROG_TYPE_CGROUP_DEVICE program may be found in
+tools/testing/selftests/bpf/progs/dev_cgroup.c in the kernel source tree.
 
 
 RDMA
index 91ba391f9b328b46ecce8c3c187d8e8c9238f0fc..43dc35fe5bc038ee93bd32f0c7fe641038427bd1 100644 (file)
                        The VGA and EFI output is eventually overwritten by
                        the real console.
 
-                       The xen output can only be used by Xen PV guests.
+                       The xen option can only be used in Xen domains.
 
                        The sclp output can only be used on s390.
 
index 07b20383cbca0db82f22861df8ed5263063c9222..b446d0f0f1b4f1e29d9139522f8c81fc2e0e0dd1 100644 (file)
@@ -50,7 +50,6 @@ properties:
               data-lanes:
                 description: array of physical DSI data lane indexes.
                 minItems: 1
-                maxItems: 4
                 items:
                   - const: 1
                   - const: 2
@@ -71,7 +70,6 @@ properties:
               data-lanes:
                 description: array of physical DSI data lane indexes.
                 minItems: 1
-                maxItems: 4
                 items:
                   - const: 1
                   - const: 2
index 1c2daf7c24cc04173517c5a0e53c776dc9ff2dc0..911564468c5e0f86132b3600355d7b813893c827 100644 (file)
@@ -18,7 +18,7 @@ properties:
     const: ti,sn65dsi86
 
   reg:
-    const: 0x2d
+    enum: [ 0x2c, 0x2d ]
 
   enable-gpios:
     maxItems: 1
index 2ed010f91e2d6db40f0cdc3ba05d7573a9591530..20ce88ab4b3a4513334ac9e4daa68620890780e4 100644 (file)
@@ -22,7 +22,7 @@ properties:
     items:
       - enum:
           # ili9341 240*320 Color on stm32f429-disco board
-        - st,sf-tc240t-9370-t
+          - st,sf-tc240t-9370-t
       - const: ilitek,ili9341
 
   reg: true
index 29de7807df54ec4713d48d972e2e0035d8a416b0..bcd41e491f1d1269237884797d3b4fdcfce58e41 100644 (file)
@@ -31,11 +31,11 @@ properties:
 
   clocks:
     minItems: 1
-    maxItems: 3
+    maxItems: 7
 
   clock-names:
     minItems: 1
-    maxItems: 3
+    maxItems: 7
 
 required:
   - compatible
@@ -72,6 +72,32 @@ allOf:
           contains:
             enum:
               - qcom,sdm660-a2noc
+    then:
+      properties:
+        clocks:
+          items:
+            - description: Bus Clock.
+            - description: Bus A Clock.
+            - description: IPA Clock.
+            - description: UFS AXI Clock.
+            - description: Aggregate2 UFS AXI Clock.
+            - description: Aggregate2 USB3 AXI Clock.
+            - description: Config NoC USB2 AXI Clock.
+        clock-names:
+          items:
+            - const: bus
+            - const: bus_a
+            - const: ipa
+            - const: ufs_axi
+            - const: aggre2_ufs_axi
+            - const: aggre2_usb3_axi
+            - const: cfg_noc_usb2_axi
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
               - qcom,sdm660-bimc
               - qcom,sdm660-cnoc
               - qcom,sdm660-gnoc
@@ -91,6 +117,7 @@ examples:
   - |
       #include <dt-bindings/clock/qcom,rpmcc.h>
       #include <dt-bindings/clock/qcom,mmcc-sdm660.h>
+      #include <dt-bindings/clock/qcom,gcc-sdm660.h>
 
       bimc: interconnect@1008000 {
               compatible = "qcom,sdm660-bimc";
@@ -123,9 +150,20 @@ examples:
               compatible = "qcom,sdm660-a2noc";
               reg = <0x01704000 0xc100>;
               #interconnect-cells = <1>;
-              clock-names = "bus", "bus_a";
+              clock-names = "bus",
+                            "bus_a",
+                            "ipa",
+                            "ufs_axi",
+                            "aggre2_ufs_axi",
+                            "aggre2_usb3_axi",
+                            "cfg_noc_usb2_axi";
               clocks = <&rpmcc RPM_SMD_AGGR2_NOC_CLK>,
-                       <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>;
+                       <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>,
+                       <&rpmcc RPM_SMD_IPA_CLK>,
+                       <&gcc GCC_UFS_AXI_CLK>,
+                       <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
+                       <&gcc GCC_AGGRE2_USB3_AXI_CLK>,
+                       <&gcc GCC_CFG_NOC_USB2_AXI_CLK>;
       };
 
       mnoc: interconnect@1745000 {
index 3e5d82df90a2e1c263324f24b9a55757de9ae207..a2abed06a099b41c7b165c0650e3967ec43a4962 100644 (file)
@@ -31,7 +31,7 @@ properties:
     maxItems: 1
 
   port:
-    $ref: /schemas/graph.yaml#/properties/port
+    $ref: /schemas/graph.yaml#/$defs/port-base
     additionalProperties: false
 
     properties:
index ad42992c6da39775c196a299559ff35373cf0cac..bf115ab9d926d7ace7ea0b5566fe997bf32db33c 100644 (file)
@@ -38,7 +38,7 @@ properties:
 
   port:
     additionalProperties: false
-    $ref: /schemas/graph.yaml#/properties/port
+    $ref: /schemas/graph.yaml#/$defs/port-base
 
     properties:
       endpoint:
index 881f79532501f1449a8c61aae41baa93e9bc5f92..cf2ca2702cc9f69175201de1c57cf94f492be812 100644 (file)
@@ -38,7 +38,7 @@ properties:
 
   port:
     additionalProperties: false
-    $ref: /schemas/graph.yaml#/properties/port
+    $ref: /schemas/graph.yaml#/$defs/port-base
 
     properties:
       endpoint:
index 1edeabf39e6a724c7d0ec8c0adfa22c0e249c294..afcf70947f7e621977b5e4b01eac6722e2c01e27 100644 (file)
@@ -38,7 +38,7 @@ properties:
 
   port:
     additionalProperties: false
-    $ref: /schemas/graph.yaml#/properties/port
+    $ref: /schemas/graph.yaml#/$defs/port-base
 
     properties:
       endpoint:
index fc1317ab32264efd3f12a92fc007925d98149588..28ac60acf4acd36cc7127f52e3692bc70457eb4f 100644 (file)
@@ -32,13 +32,13 @@ properties:
   "#size-cells":
     const: 1
 
-  pinctrl:
-    $ref: ../pinctrl/brcm,ns-pinmux.yaml
-
 patternProperties:
   '^clock-controller@[a-f0-9]+$':
     $ref: ../clock/brcm,iproc-clocks.yaml
 
+  '^pin-controller@[a-f0-9]+$':
+    $ref: ../pinctrl/brcm,ns-pinmux.yaml
+
   '^thermal@[a-f0-9]+$':
     $ref: ../thermal/brcm,ns-thermal.yaml
 
@@ -73,9 +73,10 @@ examples:
                                  "iprocfast", "sata1", "sata2";
         };
 
-        pinctrl {
+        pin-controller@1c0 {
             compatible = "brcm,bcm4708-pinmux";
-            offset = <0x1c0>;
+            reg = <0x1c0 0x24>;
+            reg-names = "cru_gpio_control";
         };
 
         thermal@2c0 {
index e6c9a2f77cc752316f22add398e7b15f441d318e..f300ced4cdf3679302e33cb1ac1e2e9d51b1146e 100644 (file)
@@ -20,9 +20,7 @@ properties:
       - snps,dwcmshc-sdhci
 
   reg:
-    minItems: 1
-    items:
-      - description: Offset and length of the register set for the device
+    maxItems: 1
 
   interrupts:
     maxItems: 1
index 30c11fea491bda4855036f8b124f0c265d16682f..2363b412410c3714285fb0259c56c2e20816412a 100644 (file)
@@ -83,7 +83,7 @@ Example:
                #interrupt-cells = <2>;
 
                switch0: switch@0 {
-                       compatible = "marvell,mv88e6390";
+                       compatible = "marvell,mv88e6190";
                        reg = <0>;
                        reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
 
index 5629b2e4ccf89b36d212fcce47f7a7291af8e367..ee4afe361fac6d2325e99c2b0c614f3178435e1a 100644 (file)
@@ -34,7 +34,6 @@ properties:
 
   clocks:
     minItems: 3
-    maxItems: 5
     items:
       - description: MAC host clock
       - description: MAC apb clock
index 42689b7d03a2ff84c35d08891fc060e06e6100a0..c115c95ee584e6aa382fc70173b1428089e6ab8b 100644 (file)
@@ -21,6 +21,7 @@ select:
       contains:
         enum:
           - snps,dwmac
+          - snps,dwmac-3.40a
           - snps,dwmac-3.50a
           - snps,dwmac-3.610
           - snps,dwmac-3.70a
@@ -76,6 +77,7 @@ properties:
         - rockchip,rk3399-gmac
         - rockchip,rv1108-gmac
         - snps,dwmac
+        - snps,dwmac-3.40a
         - snps,dwmac-3.50a
         - snps,dwmac-3.610
         - snps,dwmac-3.70a
index 2911e565b26053bb2f00e55a6c475ef6f6003935..acea1cd444fd52490450405bceb5d063e955ab2c 100644 (file)
@@ -41,7 +41,6 @@ properties:
       - description: builtin MSI controller.
 
   interrupt-names:
-    minItems: 1
     items:
       - const: msi
 
index 470aff599c2702d4ca34a78bfb0b6cc70b1fab51..fc39e3e9f71ccd91c8bff3c5e783917c02d98719 100644 (file)
@@ -17,9 +17,6 @@ description:
 
   A list of pins varies across chipsets so few bindings are available.
 
-  Node of the pinmux must be nested in the CRU (Central Resource Unit) "syscon"
-  node.
-
 properties:
   compatible:
     enum:
@@ -27,10 +24,11 @@ properties:
       - brcm,bcm4709-pinmux
       - brcm,bcm53012-pinmux
 
-  offset:
-    description: offset of pin registers in the CRU block
+  reg:
     maxItems: 1
-    $ref: /schemas/types.yaml#/definitions/uint32-array
+
+  reg-names:
+    const: cru_gpio_control
 
 patternProperties:
   '-pins$':
@@ -72,23 +70,20 @@ allOf:
                         uart1_grp ]
 
 required:
-  - offset
+  - reg
+  - reg-names
 
 additionalProperties: false
 
 examples:
   - |
-    cru@1800c100 {
-        compatible = "syscon", "simple-mfd";
-        reg = <0x1800c100 0x1a4>;
-
-        pinctrl {
-            compatible = "brcm,bcm4708-pinmux";
-            offset = <0xc0>;
-
-            spi-pins {
-                function = "spi";
-                groups = "spi_grp";
-            };
+    pin-controller@1800c1c0 {
+        compatible = "brcm,bcm4708-pinmux";
+        reg = <0x1800c1c0 0x24>;
+        reg-names = "cru_gpio_control";
+
+        spi-pins {
+            function = "spi";
+            groups = "spi_grp";
         };
     };
index ffe9ea0c1499896b23ee021ee881b805c6294726..d67ccd22c63b1eed83be1fa1f509247fdd9bdbb4 100644 (file)
 NTFS3
 =====
 
-
 Summary and Features
 ====================
 
-NTFS3 is fully functional NTFS Read-Write driver. The driver works with
-NTFS versions up to 3.1, normal/compressed/sparse files
-and journal replaying. File system type to use on mount is 'ntfs3'.
+NTFS3 is fully functional NTFS Read-Write driver. The driver works with NTFS
+versions up to 3.1. File system type to use on mount is *ntfs3*.
 
 - This driver implements NTFS read/write support for normal, sparse and
   compressed files.
-- Supports native journal replaying;
-- Supports extended attributes
-       Predefined extended attributes:
-       - 'system.ntfs_security' gets/sets security
-                       descriptor (SECURITY_DESCRIPTOR_RELATIVE)
-       - 'system.ntfs_attrib' gets/sets ntfs file/dir attributes.
-               Note: applied to empty files, this allows to switch type between
-               sparse(0x200), compressed(0x800) and normal;
+- Supports native journal replaying.
 - Supports NFS export of mounted NTFS volumes.
+- Supports extended attributes. Predefined extended attributes:
+
+       - *system.ntfs_security* gets/sets security
+
+               Descriptor: SECURITY_DESCRIPTOR_RELATIVE
+
+       - *system.ntfs_attrib* gets/sets ntfs file/dir attributes.
+
+         Note: Applied to empty files, this allows to switch type between
+         sparse(0x200), compressed(0x800) and normal.
 
 Mount Options
 =============
 
 The list below describes mount options supported by NTFS3 driver in addition to
-generic ones.
+generic ones. You can use every mount option with **no** option. If it is in
+this table marked with no it means default is without **no**.
 
-===============================================================================
+.. flat-table::
+   :widths: 1 5
+   :fill-cells:
 
-nls=name               This option informs the driver how to interpret path
-                       strings and translate them to Unicode and back. If
-                       this option is not set, the default codepage will be
-                       used (CONFIG_NLS_DEFAULT).
-                       Examples:
-                               'nls=utf8'
+   * - iocharset=name
+     - This option informs the driver how to interpret path strings and
+       translate them to Unicode and back. If this option is not set, the
+       default codepage will be used (CONFIG_NLS_DEFAULT).
 
-uid=
-gid=
-umask=                 Controls the default permissions for files/directories created
-                       after the NTFS volume is mounted.
+       Example: iocharset=utf8
 
-fmask=
-dmask=                 Instead of specifying umask which applies both to
-                       files and directories, fmask applies only to files and
-                       dmask only to directories.
+   * - uid=
+     - :rspan:`1`
+   * - gid=
 
-nohidden               Files with the Windows-specific HIDDEN (FILE_ATTRIBUTE_HIDDEN)
-                       attribute will not be shown under Linux.
+   * - umask=
+     - Controls the default permissions for files/directories created after
+       the NTFS volume is mounted.
 
-sys_immutable          Files with the Windows-specific SYSTEM
-                       (FILE_ATTRIBUTE_SYSTEM) attribute will be marked as system
-                       immutable files.
+   * - dmask=
+     - :rspan:`1` Instead of specifying umask which applies both to files and
+       directories, fmask applies only to files and dmask only to directories.
+   * - fmask=
 
-discard                        Enable support of the TRIM command for improved performance
-                       on delete operations, which is recommended for use with the
-                       solid-state drives (SSD).
+   * - noacsrules
+     - "No access rules" mount option sets access rights for files/folders to
+       777 and owner/group to root. This mount option absorbs all other
+       permissions.
 
-force                  Forces the driver to mount partitions even if 'dirty' flag
-                       (volume dirty) is set. Not recommended for use.
+       - Permissions change for files/folders will be reported as successful,
+        but they will remain 777.
 
-sparse                 Create new files as "sparse".
+       - Owner/group change will be reported as successful, butthey will stay
+        as root.
 
-showmeta               Use this parameter to show all meta-files (System Files) on
-                       a mounted NTFS partition.
-                       By default, all meta-files are hidden.
+   * - nohidden
+     - Files with the Windows-specific HIDDEN (FILE_ATTRIBUTE_HIDDEN) attribute
+       will not be shown under Linux.
 
-prealloc               Preallocate space for files excessively when file size is
-                       increasing on writes. Decreases fragmentation in case of
-                       parallel write operations to different files.
+   * - sys_immutable
+     - Files with the Windows-specific SYSTEM (FILE_ATTRIBUTE_SYSTEM) attribute
+       will be marked as system immutable files.
 
-no_acs_rules           "No access rules" mount option sets access rights for
-                       files/folders to 777 and owner/group to root. This mount
-                       option absorbs all other permissions:
-                       - permissions change for files/folders will be reported
-                               as successful, but they will remain 777;
-                       - owner/group change will be reported as successful, but
-                               they will stay as root
+   * - discard
+     - Enable support of the TRIM command for improved performance on delete
+       operations, which is recommended for use with the solid-state drives
+       (SSD).
 
-acl                    Support POSIX ACLs (Access Control Lists). Effective if
-                       supported by Kernel. Not to be confused with NTFS ACLs.
-                       The option specified as acl enables support for POSIX ACLs.
+   * - force
+     - Forces the driver to mount partitions even if volume is marked dirty.
+       Not recommended for use.
 
-noatime                        All files and directories will not update their last access
-                       time attribute if a partition is mounted with this parameter.
-                       This option can speed up file system operation.
+   * - sparse
+     - Create new files as sparse.
 
-===============================================================================
+   * - showmeta
+     - Use this parameter to show all meta-files (System Files) on a mounted
+       NTFS partition. By default, all meta-files are hidden.
 
-ToDo list
-=========
+   * - prealloc
+     - Preallocate space for files excessively when file size is increasing on
+       writes. Decreases fragmentation in case of parallel write operations to
+       different files.
 
-- Full journaling support (currently journal replaying is supported) over JBD.
+   * - acl
+     - Support POSIX ACLs (Access Control Lists). Effective if supported by
+       Kernel. Not to be confused with NTFS ACLs. The option specified as acl
+       enables support for POSIX ACLs.
 
+Todo list
+=========
+- Full journaling support over JBD. Currently journal replaying is supported
+  which is not necessarily as effectice as JBD would be.
 
 References
 ==========
-https://www.paragon-software.com/home/ntfs-linux-professional/
-       - Commercial version of the NTFS driver for Linux.
+- Commercial version of the NTFS driver for Linux.
+       https://www.paragon-software.com/home/ntfs-linux-professional/
 
-almaz.alexandrovich@paragon-software.com
-       - Direct e-mail address for feedback and requests on the NTFS3 implementation.
+- Direct e-mail address for feedback and requests on the NTFS3 implementation.
+       almaz.alexandrovich@paragon-software.com
index 364680cdad2e49eff419f67d4ddc7ce2b4a958e7..8ba72e898099ea3cba7db5d6756bbc3a85cb7948 100644 (file)
@@ -300,8 +300,8 @@ pcie_replay_count
 .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
    :doc: pcie_replay_count
 
-+GPU SmartShift Information
-============================
+GPU SmartShift Information
+==========================
 
 GPU SmartShift information via sysfs
 
index 06af044c882f41de769884a4c6d271d391022e12..607f78f0f189a3f8ec6d0ff86c20bd3c27463c0c 100644 (file)
@@ -111,15 +111,6 @@ Component Helper Usage
 .. kernel-doc:: drivers/gpu/drm/drm_drv.c
    :doc: component helper usage recommendations
 
-IRQ Helper Library
-~~~~~~~~~~~~~~~~~~
-
-.. kernel-doc:: drivers/gpu/drm/drm_irq.c
-   :doc: irq helpers
-
-.. kernel-doc:: drivers/gpu/drm/drm_irq.c
-   :export:
-
 Memory Manager Initialization
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
index 8557e26281c3a9c8499a095cd0f1263478166037..91b99adc6c487fdfe78390eef8a9cfd66edbb036 100644 (file)
@@ -132,20 +132,3 @@ On Family 17h and Family 18h CPUs, additional temperature sensors may report
 Core Complex Die (CCD) temperatures. Up to 8 such temperatures are reported
 as temp{3..10}_input, labeled Tccd{1..8}. Actual support depends on the CPU
 variant.
-
-Various Family 17h and 18h CPUs report voltage and current telemetry
-information. The following attributes may be reported.
-
-Attribute      Label   Description
-===============        ======= ================
-in0_input      Vcore   Core voltage
-in1_input      Vsoc    SoC voltage
-curr1_input    Icore   Core current
-curr2_input    Isoc    SoC current
-===============        ======= ================
-
-Current values are raw (unscaled) as reported by the CPU. Core current is
-reported as multiples of 1A / LSB. SoC is reported as multiples of 0.25A
-/ LSB. The real current is board specific. Reported currents should be seen
-as rough guidance, and should be scaled using sensors3.conf as appropriate
-for a given board.
index a432dc419fa4031d2b7c82961d6708dd552f688c..5d97cee9457be58c0ace8097347d3a97c04c69bc 100644 (file)
@@ -30,10 +30,11 @@ The ``ice`` driver reports the following versions
         PHY, link, etc.
     * - ``fw.mgmt.api``
       - running
-      - 1.5
-      - 2-digit version number of the API exported over the AdminQ by the
-        management firmware. Used by the driver to identify what commands
-        are supported.
+      - 1.5.1
+      - 3-digit version number (major.minor.patch) of the API exported over
+        the AdminQ by the management firmware. Used by the driver to
+        identify what commands are supported. Historical versions of the
+        kernel only displayed a 2-digit version number (major.minor).
     * - ``fw.mgmt.build``
       - running
       - 0x305d955f
index 6100cdc220f6b9f1bd9de27729e36c7584942f15..fa7730dbf7b9144793f4ba19c1f1d1fbee623fe9 100644 (file)
@@ -59,11 +59,11 @@ specified with a ``sockaddr`` type, with a single-byte endpoint address:
     };
 
     struct sockaddr_mctp {
-            unsigned short int smctp_family;
-            int                        smctp_network;
-            struct mctp_addr   smctp_addr;
-            __u8               smctp_type;
-            __u8               smctp_tag;
+            __kernel_sa_family_t smctp_family;
+            unsigned int         smctp_network;
+            struct mctp_addr     smctp_addr;
+            __u8                 smctp_type;
+            __u8                 smctp_tag;
     };
 
     #define MCTP_NET_ANY       0x0
index 42ef59ea531436eeaea5ee1c2a7ef2ee8c93967d..bdb880e01132595e179253faa8401371449441ac 100644 (file)
@@ -18,7 +18,7 @@ types can be added after the security issue of corresponding device driver
 is clarified or fixed in the future.
 
 Create/Destroy VDUSE devices
-------------------------
+----------------------------
 
 VDUSE devices are created as follows:
 
index 5b33791bb8e97ade63e9dc64e8e69316c8c7c1f0..80eebc1d9ed52f230b814afe0d2dc95314876c61 100644 (file)
@@ -414,7 +414,8 @@ T:  git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
 F:     drivers/acpi/pmic/
 
 ACPI THERMAL DRIVER
-M:     Zhang Rui <rui.zhang@intel.com>
+M:     Rafael J. Wysocki <rafael@kernel.org>
+R:     Zhang Rui <rui.zhang@intel.com>
 L:     linux-acpi@vger.kernel.org
 S:     Supported
 W:     https://01.org/linux-acpi
@@ -810,7 +811,7 @@ F:  Documentation/devicetree/bindings/dma/altr,msgdma.yaml
 F:     drivers/dma/altera-msgdma.c
 
 ALTERA PIO DRIVER
-M:     Joyce Ooi <joyce.ooi@intel.com>
+M:     Mun Yew Tham <mun.yew.tham@intel.com>
 L:     linux-gpio@vger.kernel.org
 S:     Maintained
 F:     drivers/gpio/gpio-altera.c
@@ -1275,6 +1276,7 @@ F:        drivers/input/mouse/bcm5974.c
 
 APPLE DART IOMMU DRIVER
 M:     Sven Peter <sven@svenpeter.dev>
+R:     Alyssa Rosenzweig <alyssa@rosenzweig.io>
 L:     iommu@lists.linux-foundation.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/iommu/apple,dart.yaml
@@ -1711,6 +1713,8 @@ F:        drivers/*/*alpine*
 
 ARM/APPLE MACHINE SUPPORT
 M:     Hector Martin <marcan@marcan.st>
+M:     Sven Peter <sven@svenpeter.dev>
+R:     Alyssa Rosenzweig <alyssa@rosenzweig.io>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 W:     https://asahilinux.org
@@ -2236,6 +2240,7 @@ F:        arch/arm/mach-pxa/mioa701.c
 
 ARM/MStar/Sigmastar Armv7 SoC support
 M:     Daniel Palmer <daniel@thingy.jp>
+M:     Romain Perier <romain.perier@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 W:     http://linux-chenxing.org/
@@ -2712,6 +2717,7 @@ F:        drivers/power/reset/keystone-reset.c
 
 ARM/TEXAS INSTRUMENTS K3 ARCHITECTURE
 M:     Nishanth Menon <nm@ti.com>
+M:     Vignesh Raghavendra <vigneshr@ti.com>
 M:     Tero Kristo <kristo@kernel.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Supported
@@ -2961,7 +2967,7 @@ F:        crypto/async_tx/
 F:     include/linux/async_tx.h
 
 AT24 EEPROM DRIVER
-M:     Bartosz Golaszewski <bgolaszewski@baylibre.com>
+M:     Bartosz Golaszewski <brgl@bgdev.pl>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
@@ -3384,9 +3390,11 @@ F:       Documentation/networking/filter.rst
 F:     Documentation/userspace-api/ebpf/
 F:     arch/*/net/*
 F:     include/linux/bpf*
+F:     include/linux/btf*
 F:     include/linux/filter.h
 F:     include/trace/events/xdp.h
 F:     include/uapi/linux/bpf*
+F:     include/uapi/linux/btf*
 F:     include/uapi/linux/filter.h
 F:     kernel/bpf/
 F:     kernel/trace/bpf_trace.c
@@ -3820,7 +3828,6 @@ F:        drivers/scsi/mpi3mr/
 
 BROADCOM NETXTREME-E ROCE DRIVER
 M:     Selvin Xavier <selvin.xavier@broadcom.com>
-M:     Naresh Kumar PBS <nareshkumar.pbs@broadcom.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
 W:     http://www.broadcom.com
@@ -4655,7 +4662,7 @@ W:        http://linux-cifs.samba.org/
 T:     git git://git.samba.org/sfrench/cifs-2.6.git
 F:     Documentation/admin-guide/cifs/
 F:     fs/cifs/
-F:     fs/cifs_common/
+F:     fs/smbfs_common/
 
 COMPACTPCI HOTPLUG CORE
 M:     Scott Murray <scott@spiteful.org>
@@ -5451,6 +5458,19 @@ F:       include/net/devlink.h
 F:     include/uapi/linux/devlink.h
 F:     net/core/devlink.c
 
+DH ELECTRONICS IMX6 DHCOM BOARD SUPPORT
+M:     Christoph Niedermaier <cniedermaier@dh-electronics.com>
+L:     kernel@dh-electronics.com
+S:     Maintained
+F:     arch/arm/boot/dts/imx6*-dhcom-*
+
+DH ELECTRONICS STM32MP1 DHCOM/DHCOR BOARD SUPPORT
+M:     Marek Vasut <marex@denx.de>
+L:     kernel@dh-electronics.com
+S:     Maintained
+F:     arch/arm/boot/dts/stm32mp1*-dhcom-*
+F:     arch/arm/boot/dts/stm32mp1*-dhcor-*
+
 DIALOG SEMICONDUCTOR DRIVERS
 M:     Support Opensource <support.opensource@diasemi.com>
 S:     Supported
@@ -7336,10 +7356,11 @@ F:      include/uapi/linux/fpga-dfl.h
 
 FPGA MANAGER FRAMEWORK
 M:     Moritz Fischer <mdf@kernel.org>
+M:     Wu Hao <hao.wu@intel.com>
+M:     Xu Yilun <yilun.xu@intel.com>
 R:     Tom Rix <trix@redhat.com>
 L:     linux-fpga@vger.kernel.org
 S:     Maintained
-W:     http://www.rocketboards.org
 Q:     http://patchwork.kernel.org/project/linux-fpga/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mdf/linux-fpga.git
 F:     Documentation/devicetree/bindings/fpga/
@@ -7433,7 +7454,7 @@ FREESCALE IMX / MXC FEC DRIVER
 M:     Joakim Zhang <qiangqing.zhang@nxp.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
-F:     Documentation/devicetree/bindings/net/fsl-fec.txt
+F:     Documentation/devicetree/bindings/net/fsl,fec.yaml
 F:     drivers/net/ethernet/freescale/fec.h
 F:     drivers/net/ethernet/freescale/fec_main.c
 F:     drivers/net/ethernet/freescale/fec_ptp.c
@@ -7985,7 +8006,7 @@ F:        include/linux/gpio/regmap.h
 
 GPIO SUBSYSTEM
 M:     Linus Walleij <linus.walleij@linaro.org>
-M:     Bartosz Golaszewski <bgolaszewski@baylibre.com>
+M:     Bartosz Golaszewski <brgl@bgdev.pl>
 L:     linux-gpio@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
@@ -8607,9 +8628,8 @@ F:        Documentation/devicetree/bindings/iio/humidity/st,hts221.yaml
 F:     drivers/iio/humidity/hts221*
 
 HUAWEI ETHERNET DRIVER
-M:     Bin Luo <luobin9@huawei.com>
 L:     netdev@vger.kernel.org
-S:     Supported
+S:     Orphan
 F:     Documentation/networking/device_drivers/ethernet/huawei/hinic.rst
 F:     drivers/net/ethernet/huawei/hinic/
 
@@ -9301,7 +9321,7 @@ S:        Maintained
 F:     drivers/platform/x86/intel/atomisp2/led.c
 
 INTEL BIOS SAR INT1092 DRIVER
-M:     Shravan S <s.shravan@intel.com>
+M:     Shravan Sudhakar <s.shravan@intel.com>
 M:     Intel Corporation <linuxwwan@intel.com>
 L:     platform-driver-x86@vger.kernel.org
 S:     Maintained
@@ -9623,7 +9643,7 @@ F:        include/uapi/linux/isst_if.h
 F:     tools/power/x86/intel-speed-select/
 
 INTEL STRATIX10 FIRMWARE DRIVERS
-M:     Richard Gong <richard.gong@linux.intel.com>
+M:     Dinh Nguyen <dinguyen@kernel.org>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/ABI/testing/sysfs-devices-platform-stratix10-rsu
@@ -10193,8 +10213,8 @@ M:      Hyunchul Lee <hyc.lee@gmail.com>
 L:     linux-cifs@vger.kernel.org
 S:     Maintained
 T:     git git://git.samba.org/ksmbd.git
-F:     fs/cifs_common/
 F:     fs/ksmbd/
+F:     fs/smbfs_common/
 
 KERNEL UNIT TESTING FRAMEWORK (KUnit)
 M:     Brendan Higgins <brendanhiggins@google.com>
@@ -10273,7 +10293,6 @@ KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
 M:     Christian Borntraeger <borntraeger@de.ibm.com>
 M:     Janosch Frank <frankja@linux.ibm.com>
 R:     David Hildenbrand <david@redhat.com>
-R:     Cornelia Huck <cohuck@redhat.com>
 R:     Claudio Imbrenda <imbrenda@linux.ibm.com>
 L:     kvm@vger.kernel.org
 S:     Supported
@@ -11147,6 +11166,7 @@ S:      Maintained
 F:     Documentation/devicetree/bindings/net/dsa/marvell.txt
 F:     Documentation/networking/devlink/mv88e6xxx.rst
 F:     drivers/net/dsa/mv88e6xxx/
+F:     include/linux/dsa/mv88e6xxx.h
 F:     include/linux/platform_data/mv88e6xxx.h
 
 MARVELL ARMADA 3700 PHY DRIVERS
@@ -11366,7 +11386,7 @@ F:      Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.yaml
 F:     drivers/iio/proximity/mb1232.c
 
 MAXIM MAX77650 PMIC MFD DRIVER
-M:     Bartosz Golaszewski <bgolaszewski@baylibre.com>
+M:     Bartosz Golaszewski <brgl@bgdev.pl>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/*/*max77650.yaml
@@ -16295,6 +16315,7 @@ S390
 M:     Heiko Carstens <hca@linux.ibm.com>
 M:     Vasily Gorbik <gor@linux.ibm.com>
 M:     Christian Borntraeger <borntraeger@de.ibm.com>
+R:     Alexander Gordeev <agordeev@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
 W:     http://www.ibm.com/developerworks/linux/linux390/
@@ -16373,7 +16394,6 @@ F:      drivers/s390/crypto/vfio_ap_ops.c
 F:     drivers/s390/crypto/vfio_ap_private.h
 
 S390 VFIO-CCW DRIVER
-M:     Cornelia Huck <cohuck@redhat.com>
 M:     Eric Farman <farman@linux.ibm.com>
 M:     Matthew Rosato <mjrosato@linux.ibm.com>
 R:     Halil Pasic <pasic@linux.ibm.com>
@@ -17792,7 +17812,6 @@ F:      drivers/staging/nvec/
 
 STAGING - OLPC SECONDARY DISPLAY CONTROLLER (DCON)
 M:     Jens Frederich <jfrederich@gmail.com>
-M:     Daniel Drake <dsd@laptop.org>
 M:     Jon Nettleton <jon.nettleton@gmail.com>
 S:     Maintained
 W:     http://wiki.laptop.org/go/DCON
@@ -17883,7 +17902,8 @@ M:      Olivier Moysan <olivier.moysan@foss.st.com>
 M:     Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Maintained
-F:     Documentation/devicetree/bindings/iio/adc/st,stm32-*.yaml
+F:     Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
+F:     Documentation/devicetree/bindings/sound/st,stm32-*.yaml
 F:     sound/soc/stm/
 
 STM32 TIMER/LPTIMER DRIVERS
@@ -17980,7 +18000,7 @@ F:      net/switchdev/
 SY8106A REGULATOR DRIVER
 M:     Icenowy Zheng <icenowy@aosc.io>
 S:     Maintained
-F:     Documentation/devicetree/bindings/regulator/sy8106a-regulator.txt
+F:     Documentation/devicetree/bindings/regulator/silergy,sy8106a.yaml
 F:     drivers/regulator/sy8106a-regulator.c
 
 SYNC FILE FRAMEWORK
@@ -18547,13 +18567,14 @@ T:    git git://linuxtv.org/media_tree.git
 F:     drivers/media/radio/radio-raremono.c
 
 THERMAL
-M:     Zhang Rui <rui.zhang@intel.com>
+M:     Rafael J. Wysocki <rafael@kernel.org>
 M:     Daniel Lezcano <daniel.lezcano@linaro.org>
 R:     Amit Kucheria <amitk@kernel.org>
+R:     Zhang Rui <rui.zhang@intel.com>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 Q:     https://patchwork.kernel.org/project/linux-pm/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/thermal/linux.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git thermal
 F:     Documentation/devicetree/bindings/thermal/
 F:     drivers/thermal/
 F:     include/linux/cpu_cooling.h
@@ -18682,7 +18703,7 @@ F:      include/linux/clk/ti.h
 
 TI DAVINCI MACHINE SUPPORT
 M:     Sekhar Nori <nsekhar@ti.com>
-R:     Bartosz Golaszewski <bgolaszewski@baylibre.com>
+R:     Bartosz Golaszewski <brgl@bgdev.pl>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git
@@ -20328,6 +20349,7 @@ X86 ARCHITECTURE (32-BIT AND 64-BIT)
 M:     Thomas Gleixner <tglx@linutronix.de>
 M:     Ingo Molnar <mingo@redhat.com>
 M:     Borislav Petkov <bp@alien8.de>
+M:     Dave Hansen <dave.hansen@linux.intel.com>
 M:     x86@kernel.org
 R:     "H. Peter Anvin" <hpa@zytor.com>
 L:     linux-kernel@vger.kernel.org
@@ -20696,7 +20718,6 @@ S:      Maintained
 F:     mm/zbud.c
 
 ZD1211RW WIRELESS DRIVER
-M:     Daniel Drake <dsd@gentoo.org>
 M:     Ulrich Kunitz <kune@deine-taler.de>
 L:     linux-wireless@vger.kernel.org
 L:     zd1211-devs@lists.sourceforge.net (subscribers-only)
index 437ccc66a1c281e698a07c57c6d4d033c8209bbc..30c7c81d0437a46ba06368eb0dbe5624dc870757 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 15
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc7
 NAME = Opossums on Parade
 
 # *DOCUMENTATION*
index 9320b04c04bf55ba13ba85c3b14a48e0244531af..4cf45a99fd792a5e1b528c96a610cfdd9120f14e 100644 (file)
@@ -26,11 +26,6 @@ extern char empty_zero_page[PAGE_SIZE];
 
 extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
 
-/* Macro to mark a page protection as uncacheable */
-#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
-
-extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
-
 /* to cope with aliasing VIPT cache */
 #define HAVE_ARCH_UNMAPPED_AREA
 
index fc196421b2cedb5ecf92eaeff32bcd907a2399d8..dcf2df6da98f075e01d5a7105aa84003a64867af 100644 (file)
@@ -92,6 +92,7 @@ config ARM
        select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
        select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
        select HAVE_FUNCTION_TRACER if !XIP_KERNEL
+       select HAVE_FUTEX_CMPXCHG if FUTEX
        select HAVE_GCC_PLUGINS
        select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
        select HAVE_IRQ_TIME_ACCOUNTING
@@ -1989,8 +1990,6 @@ config ARCH_HIBERNATION_POSSIBLE
 
 endmenu
 
-source "drivers/firmware/Kconfig"
-
 if CRYPTO
 source "arch/arm/crypto/Kconfig"
 endif
index aa075d8372ea20bd030d43663b61acba3e758d4e..74255e819831424216778c92fab3a1a92d6b3ab4 100644 (file)
@@ -47,7 +47,10 @@ extern char * strchrnul(const char *, int);
 #endif
 
 #ifdef CONFIG_KERNEL_XZ
+/* Prevent KASAN override of string helpers in decompressor */
+#undef memmove
 #define memmove memmove
+#undef memcpy
 #define memcpy memcpy
 #include "../../../../lib/decompress_unxz.c"
 #endif
index 614999dcb990358eda699dacbeaf9630affbd8da..cd4672501add7d91b1bd930f6b78840d3187cd01 100644 (file)
@@ -71,7 +71,6 @@
                        isc: isc@f0008000 {
                                pinctrl-names = "default";
                                pinctrl-0 = <&pinctrl_isc_base &pinctrl_isc_data_8bit &pinctrl_isc_data_9_10 &pinctrl_isc_data_11_12>;
-                               status = "okay";
                        };
 
                        qspi1: spi@f0024000 {
index 4cbed98cc2f43ce91065f626db64c1654ff4ebda..f3d6aaa3a78dc54e0fb706c36f98613a8cf86483 100644 (file)
 
                                        regulator-state-standby {
                                                regulator-on-in-suspend;
+                                               regulator-suspend-microvolt = <1350000>;
                                                regulator-mode = <4>;
                                        };
 
                                        regulator-state-mem {
                                                regulator-on-in-suspend;
+                                               regulator-suspend-microvolt = <1350000>;
                                                regulator-mode = <4>;
                                        };
                                };
        #address-cells = <1>;
        #size-cells = <0>;
        pinctrl-names = "default";
-       pinctrl-0 = <&pinctrl_gmac0_default &pinctrl_gmac0_txck_default &pinctrl_gmac0_phy_irq>;
+       pinctrl-0 = <&pinctrl_gmac0_default
+                    &pinctrl_gmac0_mdio_default
+                    &pinctrl_gmac0_txck_default
+                    &pinctrl_gmac0_phy_irq>;
        phy-mode = "rgmii-id";
        status = "okay";
 
        #address-cells = <1>;
        #size-cells = <0>;
        pinctrl-names = "default";
-       pinctrl-0 = <&pinctrl_gmac1_default &pinctrl_gmac1_phy_irq>;
+       pinctrl-0 = <&pinctrl_gmac1_default
+                    &pinctrl_gmac1_mdio_default
+                    &pinctrl_gmac1_phy_irq>;
        phy-mode = "rmii";
        status = "okay";
 
                         <PIN_PA15__G0_TXEN>,
                         <PIN_PA30__G0_RXCK>,
                         <PIN_PA18__G0_RXDV>,
-                        <PIN_PA22__G0_MDC>,
-                        <PIN_PA23__G0_MDIO>,
                         <PIN_PA25__G0_125CK>;
+               slew-rate = <0>;
+               bias-disable;
+       };
+
+       pinctrl_gmac0_mdio_default: gmac0_mdio_default {
+               pinmux = <PIN_PA22__G0_MDC>,
+                        <PIN_PA23__G0_MDIO>;
                bias-disable;
        };
 
        pinctrl_gmac0_txck_default: gmac0_txck_default {
                pinmux = <PIN_PA24__G0_TXCK>;
+               slew-rate = <0>;
                bias-pull-up;
        };
 
                         <PIN_PD25__G1_RX0>,
                         <PIN_PD26__G1_RX1>,
                         <PIN_PD27__G1_RXER>,
-                        <PIN_PD24__G1_RXDV>,
-                        <PIN_PD28__G1_MDC>,
+                        <PIN_PD24__G1_RXDV>;
+               slew-rate = <0>;
+               bias-disable;
+       };
+
+       pinctrl_gmac1_mdio_default: gmac1_mdio_default {
+               pinmux = <PIN_PD28__G1_MDC>,
                         <PIN_PD29__G1_MDIO>;
                bias-disable;
        };
                                 <PIN_PA8__SDMMC0_DAT5>,
                                 <PIN_PA9__SDMMC0_DAT6>,
                                 <PIN_PA10__SDMMC0_DAT7>;
+                       slew-rate = <0>;
                        bias-pull-up;
                };
 
                        pinmux = <PIN_PA0__SDMMC0_CK>,
                                 <PIN_PA2__SDMMC0_RSTN>,
                                 <PIN_PA11__SDMMC0_DS>;
+                       slew-rate = <0>;
                        bias-pull-up;
                };
        };
                                 <PIN_PC0__SDMMC1_DAT1>,
                                 <PIN_PC1__SDMMC1_DAT2>,
                                 <PIN_PC2__SDMMC1_DAT3>;
+                       slew-rate = <0>;
                        bias-pull-up;
                };
 
                                 <PIN_PB28__SDMMC1_RSTN>,
                                 <PIN_PC5__SDMMC1_1V8SEL>,
                                 <PIN_PC4__SDMMC1_CD>;
+                       slew-rate = <0>;
                        bias-pull-up;
                };
        };
                                 <PIN_PD6__SDMMC2_DAT1>,
                                 <PIN_PD7__SDMMC2_DAT2>,
                                 <PIN_PD8__SDMMC2_DAT3>;
+                       slew-rate = <0>;
                        bias-pull-up;
                };
 
                ck {
                        pinmux = <PIN_PD4__SDMMC2_CK>;
+                       slew-rate = <0>;
                        bias-pull-up;
                };
        };
        pinctrl-0 = <&pinctrl_sdmmc2_default>;
 };
 
+&shdwc {
+       atmel,shdwc-debouncer = <976>;
+       status = "okay";
+
+       input@0 {
+               reg = <0>;
+       };
+};
+
 &spdifrx {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_spdifrx_default>;
index f24bdd0870a521bb0412805f8fa0d1a7e3aa474c..72ce80fbf26626f45729b257914f2baab2078217 100644 (file)
@@ -40,8 +40,8 @@
                regulator-always-on;
                regulator-settling-time-us = <5000>;
                gpios = <&expgpio 4 GPIO_ACTIVE_HIGH>;
-               states = <1800000 0x1
-                         3300000 0x0>;
+               states = <1800000 0x1>,
+                        <3300000 0x0>;
                status = "okay";
        };
 
 };
 
 &pcie0 {
-       pci@1,0 {
+       pci@0,0 {
+               device_type = "pci";
                #address-cells = <3>;
                #size-cells = <2>;
                ranges;
 
                reg = <0 0 0 0 0>;
 
-               usb@1,0 {
-                       reg = <0x10000 0 0 0 0>;
+               usb@0,0 {
+                       reg = <0 0 0 0 0>;
                        resets = <&reset RASPBERRYPI_FIRMWARE_RESET_ID_USB>;
                };
        };
index b8a4096192aa92fb801ea28308dcb89e9b75d141..3b60297af7f60b09f4c625e121f7e2d938eb9b58 100644 (file)
                        status = "disabled";
                };
 
+               vec: vec@7ec13000 {
+                       compatible = "brcm,bcm2711-vec";
+                       reg = <0x7ec13000 0x1000>;
+                       clocks = <&clocks BCM2835_CLOCK_VEC>;
+                       interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+                       status = "disabled";
+               };
+
                dvp: clock@7ef00000 {
                        compatible = "brcm,brcm2711-dvp";
                        reg = <0x7ef00000 0x10>;
                                compatible = "brcm,genet-mdio-v5";
                                reg = <0xe14 0x8>;
                                reg-names = "mdio";
-                               #address-cells = <0x0>;
-                               #size-cells = <0x1>;
+                               #address-cells = <0x1>;
+                               #size-cells = <0x0>;
                        };
                };
        };
index 4119271c979d6f1b80e32e0ace0a4f76a3f4393d..c25e797b90600a26a5ed510084bef57e65ee80ae 100644 (file)
                        status = "okay";
                };
 
+               vec: vec@7e806000 {
+                       compatible = "brcm,bcm2835-vec";
+                       reg = <0x7e806000 0x1000>;
+                       clocks = <&clocks BCM2835_CLOCK_VEC>;
+                       interrupts = <2 27>;
+                       status = "disabled";
+               };
+
                pixelvalve@7e807000 {
                        compatible = "brcm,bcm2835-pixelvalve2";
                        reg = <0x7e807000 0x100>;
index 0f3be55201a5bea78b2d75b4455ec7a3e6c5ebfc..a3e06b6809476c3431e0a85ae9781d6bc37c600f 100644 (file)
                        status = "disabled";
                };
 
-               vec: vec@7e806000 {
-                       compatible = "brcm,bcm2835-vec";
-                       reg = <0x7e806000 0x1000>;
-                       clocks = <&clocks BCM2835_CLOCK_VEC>;
-                       interrupts = <2 27>;
-                       status = "disabled";
-               };
-
                usb: usb@7e980000 {
                        compatible = "brcm,bcm2835-usb";
                        reg = <0x7e980000 0x10000>;
index d3082b9774e4094f4c9afe99dff9704cc8969d07..4f88e96d81ddbdd42078409d0a94e43f3e194566 100644 (file)
@@ -56,6 +56,7 @@
        panel {
                compatible = "edt,etm0700g0dh6";
                pinctrl-0 = <&pinctrl_display_gpio>;
+               pinctrl-names = "default";
                enable-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>;
 
                port {
@@ -76,8 +77,7 @@
                regulator-name = "vbus";
                regulator-min-microvolt = <5000000>;
                regulator-max-microvolt = <5000000>;
-               gpio = <&gpio1 2 GPIO_ACTIVE_HIGH>;
-               enable-active-high;
+               gpio = <&gpio1 2 0>;
        };
 };
 
index cb8b539eb29d107e70d0df38d830877ec15103df..e5c4dc65fbabf518ee273f610cc93abafe3a18e4 100644 (file)
@@ -5,6 +5,7 @@
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
 #include <dt-bindings/pwm/pwm.h>
 
 / {
                        led-cur = /bits/ 8 <0x20>;
                        max-cur = /bits/ 8 <0x60>;
                        reg = <0>;
+                       color = <LED_COLOR_ID_RED>;
                };
 
                chan@1 {
                        led-cur = /bits/ 8 <0x20>;
                        max-cur = /bits/ 8 <0x60>;
                        reg = <1>;
+                       color = <LED_COLOR_ID_GREEN>;
                };
 
                chan@2 {
                        led-cur = /bits/ 8 <0x20>;
                        max-cur = /bits/ 8 <0x60>;
                        reg = <2>;
+                       color = <LED_COLOR_ID_BLUE>;
                };
 
                chan@3 {
                        led-cur = /bits/ 8 <0x0>;
                        max-cur = /bits/ 8 <0x0>;
                        reg = <3>;
+                       color = <LED_COLOR_ID_WHITE>;
                };
        };
 
index 5de4ccb979163dd7bc54b26658a3f69571382559..f7a56d6b160c8f75c5412be21449f5db58d84df7 100644 (file)
        pinctrl-0 = <&pinctrl_enet>;
        phy-mode = "rgmii-id";
        phy-reset-gpios = <&gpio1 26 GPIO_ACTIVE_LOW>;
+       phy-handle = <&phy>;
        status = "okay";
+
+       mdio {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               phy: ethernet-phy@1 {
+                       reg = <1>;
+                       qca,clk-out-frequency = <125000000>;
+               };
+       };
 };
 
 &hdmi {
index 5a63ca6157229ccc48d175f8045ef9446b203720..99f4cf777a384b0286f3ad177534e4428a5aa37d 100644 (file)
                compatible = "micron,n25q256a", "jedec,spi-nor";
                spi-max-frequency = <29000000>;
                spi-rx-bus-width = <4>;
-               spi-tx-bus-width = <4>;
+               spi-tx-bus-width = <1>;
                reg = <0>;
        };
 
                compatible = "micron,n25q256a", "jedec,spi-nor";
                spi-max-frequency = <29000000>;
                spi-rx-bus-width = <4>;
-               spi-tx-bus-width = <4>;
+               spi-tx-bus-width = <1>;
                reg = <2>;
        };
 };
index 779cc536566d6156394c4dd5b2ae2578286bae6f..a3fde3316c7360002e324cfd7d013578819403f4 100644 (file)
                compatible = "micron,n25q256a", "jedec,spi-nor";
                spi-max-frequency = <29000000>;
                spi-rx-bus-width = <4>;
-               spi-tx-bus-width = <4>;
+               spi-tx-bus-width = <1>;
                reg = <0>;
        };
 };
index c5b903718414992c105b2895b24e9c44476ab9fc..7d530ae3483b806e2c125facb086eabab6fff3a7 100644 (file)
 
        nand@1,0 {
                compatible = "ti,omap2-nand";
-               reg = <0 0 4>; /* CS0, offset 0, IO size 4 */
+               reg = <1 0 4>; /* CS1, offset 0, IO size 4 */
                interrupt-parent = <&gpmc>;
                interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
                             <1 IRQ_TYPE_NONE>; /* termcount */
index 0b2bed6e7adfd10febce5bc2441d040d9969c692..d1c1c6aab2b8781b7f047d93293cb9fb2f7c9cb1 100644 (file)
                        clock-frequency = <19200000>;
                };
 
-               pxo_board {
+               pxo_board: pxo_board {
                        compatible = "fixed-clock";
                        #clock-cells = <0>;
                        clock-frequency = <27000000>;
                };
 
                gpu: adreno-3xx@4300000 {
-                       compatible = "qcom,adreno-3xx";
+                       compatible = "qcom,adreno-320.2", "qcom,adreno";
                        reg = <0x04300000 0x20000>;
                        reg-names = "kgsl_3d0_reg_memory";
                        interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "kgsl_3d0_irq";
                        clock-names =
-                           "core_clk",
-                           "iface_clk",
-                           "mem_clk",
-                           "mem_iface_clk";
+                           "core",
+                           "iface",
+                           "mem",
+                           "mem_iface";
                        clocks =
                            <&mmcc GFX3D_CLK>,
                            <&mmcc GFX3D_AHB_CLK>,
                            <&mmcc GFX3D_AXI_CLK>,
                            <&mmcc MMSS_IMEM_AHB_CLK>;
-                       qcom,chipid = <0x03020002>;
 
                        iommus = <&gfx3d 0
                                  &gfx3d 1
                        reg-names = "dsi_pll", "dsi_phy", "dsi_phy_regulator";
                        clock-names = "iface_clk", "ref";
                        clocks = <&mmcc DSI_M_AHB_CLK>,
-                                <&cxo_board>;
+                                <&pxo_board>;
                };
 
 
index cc6be6db7b80b9c798927724d2927ad0a7baaedc..6c58c151c6d9e12bf50ca826f418433496cd30d9 100644 (file)
                #size-cells = <1>;
                ranges;
 
+               securam: securam@e0000000 {
+                       compatible = "microchip,sama7g5-securam", "atmel,sama5d2-securam", "mmio-sram";
+                       reg = <0xe0000000 0x4000>;
+                       clocks = <&pmc PMC_TYPE_PERIPHERAL 18>;
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       ranges = <0 0xe0000000 0x4000>;
+                       no-memory-wc;
+                       status = "okay";
+               };
+
                secumod: secumod@e0004000 {
                        compatible = "microchip,sama7g5-secumod", "atmel,sama5d2-secumod", "syscon";
                        reg = <0xe0004000 0x4000>;
                        clock-names = "td_slck", "md_slck", "main_xtal";
                };
 
+               shdwc: shdwc@e001d010 {
+                       compatible = "microchip,sama7g5-shdwc", "syscon";
+                       reg = <0xe001d010 0x10>;
+                       clocks = <&clk32k 0>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       atmel,wakeup-rtc-timer;
+                       atmel,wakeup-rtt-timer;
+                       status = "disabled";
+               };
+
                rtt: rtt@e001d020 {
                        compatible = "microchip,sama7g5-rtt", "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt";
                        reg = <0xe001d020 0x30>;
                        clocks = <&clk32k 0>;
                };
 
+               chipid@e0020000 {
+                       compatible = "microchip,sama7g5-chipid";
+                       reg = <0xe0020000 0x8>;
+               };
+
                sdmmc0: mmc@e1204000 {
                        compatible = "microchip,sama7g5-sdhci", "microchip,sam9x60-sdhci";
                        reg = <0xe1204000 0x4000>;
                        };
                };
 
+               uddrc: uddrc@e3800000 {
+                       compatible = "microchip,sama7g5-uddrc";
+                       reg = <0xe3800000 0x4000>;
+                       status = "okay";
+               };
+
+               ddr3phy: ddr3phy@e3804000 {
+                       compatible = "microchip,sama7g5-ddr3phy";
+                       reg = <0xe3804000 0x1000>;
+                       status = "okay";
+               };
+
                gic: interrupt-controller@e8c11000 {
                        compatible = "arm,cortex-a7-gic";
                        #interrupt-cells = <3>;
index f266b7b034823a1063f4107fc6c0eda9e3e29767..cc88ebe7a60ced1725da1f3939f2fa0e462ef918 100644 (file)
@@ -47,7 +47,7 @@
                };
 
                gmac: eth@e0800000 {
-                       compatible = "st,spear600-gmac";
+                       compatible = "snps,dwmac-3.40a";
                        reg = <0xe0800000 0x8000>;
                        interrupts = <23 22>;
                        interrupt-names = "macirq", "eth_wake_irq";
index 8077f1716fbc81440d4e39fbcf99875f67d51453..ecb91fb899ff3db1d1bb0bc54da12ce069be7586 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&gmac_rgmii_pins>;
        phy-handle = <&phy1>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
        status = "okay";
 };
 
index 2ad9fd7c94ecce45e437b3d96fc2fc6a8f72bb45..8af4b77fe655db89a1f929fab8be20bb8c6709e1 100644 (file)
@@ -17,6 +17,7 @@
  * TAKE CARE WHEN MAINTAINING THIS FILE TO PROPAGATE ANY RELEVANT
  * CHANGES TO vexpress-v2m.dtsi!
  */
+#include <dt-bindings/interrupt-controller/arm-gic.h>
 
 / {
        v2m_fixed_3v3: fixed-regulator-0 {
        };
 
        bus@8000000 {
-               motherboard-bus {
-                       model = "V2M-P1";
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               #interrupt-cells = <1>;
+               interrupt-map-mask = <0 63>;
+               interrupt-map = <0  0 &gic GIC_SPI  0 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  1 &gic GIC_SPI  1 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  2 &gic GIC_SPI  2 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  3 &gic GIC_SPI  3 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  4 &gic GIC_SPI  4 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  5 &gic GIC_SPI  5 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  6 &gic GIC_SPI  6 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  7 &gic GIC_SPI  7 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  8 &gic GIC_SPI  8 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  9 &gic GIC_SPI  9 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 10 &gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 11 &gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 12 &gic GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 13 &gic GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 14 &gic GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 15 &gic GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 16 &gic GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 17 &gic GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 18 &gic GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 19 &gic GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 20 &gic GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 21 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 22 &gic GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 23 &gic GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 24 &gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 25 &gic GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 26 &gic GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 27 &gic GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 28 &gic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 29 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 30 &gic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 31 &gic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 32 &gic GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 33 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 34 &gic GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 35 &gic GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 36 &gic GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 37 &gic GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 38 &gic GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 39 &gic GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 40 &gic GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 41 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 42 &gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+
+               motherboard-bus@8000000 {
                        arm,hbi = <0x190>;
                        arm,vexpress,site = <0>;
-                       arm,v2m-memory-map = "rs1";
                        compatible = "arm,vexpress,v2m-p1", "simple-bus";
                        #address-cells = <2>; /* SMB chipselect number and offset */
                        #size-cells = <1>;
-                       #interrupt-cells = <1>;
-                       ranges;
+                       ranges = <0 0 0x08000000 0x04000000>,
+                                <1 0 0x14000000 0x04000000>,
+                                <2 0 0x18000000 0x04000000>,
+                                <3 0 0x1c000000 0x04000000>,
+                                <4 0 0x0c000000 0x04000000>,
+                                <5 0 0x10000000 0x04000000>;
 
                        nor_flash: flash@0 {
                                compatible = "arm,vexpress-flash", "cfi-flash";
                                        clock-names = "apb_pclk";
                                };
 
-                               mmci@50000 {
+                               mmc@50000 {
                                        compatible = "arm,pl180", "arm,primecell";
                                        reg = <0x050000 0x1000>;
                                        interrupts = <9>, <10>;
                                        clock-names = "uartclk", "apb_pclk";
                                };
 
-                               wdt@f0000 {
+                               watchdog@f0000 {
                                        compatible = "arm,sp805", "arm,primecell";
                                        reg = <0x0f0000 0x1000>;
                                        interrupts = <0>;
index ec13ceb9ed362263fb7bd4428f75616d75f25384..f434fe5cf4a14ffac109fd48d230070a41049e11 100644 (file)
  * TAKE CARE WHEN MAINTAINING THIS FILE TO PROPAGATE ANY RELEVANT
  * CHANGES TO vexpress-v2m-rs1.dtsi!
  */
+#include <dt-bindings/interrupt-controller/arm-gic.h>
 
 / {
-       bus@4000000 {
-               motherboard {
-                       model = "V2M-P1";
+       bus@40000000 {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges = <0x40000000 0x40000000 0x10000000>,
+                        <0x10000000 0x10000000 0x00020000>;
+
+               #interrupt-cells = <1>;
+               interrupt-map-mask = <0 63>;
+               interrupt-map = <0  0 &gic GIC_SPI  0 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  1 &gic GIC_SPI  1 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  2 &gic GIC_SPI  2 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  3 &gic GIC_SPI  3 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  4 &gic GIC_SPI  4 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  5 &gic GIC_SPI  5 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  6 &gic GIC_SPI  6 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  7 &gic GIC_SPI  7 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  8 &gic GIC_SPI  8 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  9 &gic GIC_SPI  9 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 10 &gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 11 &gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 12 &gic GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 13 &gic GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 14 &gic GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 15 &gic GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 16 &gic GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 17 &gic GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 18 &gic GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 19 &gic GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 20 &gic GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 21 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 22 &gic GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 23 &gic GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 24 &gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 25 &gic GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 26 &gic GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 27 &gic GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 28 &gic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 29 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 30 &gic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 31 &gic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 32 &gic GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 33 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 34 &gic GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 35 &gic GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 36 &gic GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 37 &gic GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 38 &gic GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 39 &gic GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 40 &gic GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 41 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 42 &gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+
+               motherboard-bus@40000000 {
                        arm,hbi = <0x190>;
                        arm,vexpress,site = <0>;
                        compatible = "arm,vexpress,v2m-p1", "simple-bus";
                        #address-cells = <2>; /* SMB chipselect number and offset */
                        #size-cells = <1>;
-                       #interrupt-cells = <1>;
-                       ranges;
+                       ranges = <0 0 0x40000000 0x04000000>,
+                                <1 0 0x44000000 0x04000000>,
+                                <2 0 0x48000000 0x04000000>,
+                                <3 0 0x4c000000 0x04000000>,
+                                <7 0 0x10000000 0x00020000>;
 
                        flash@0,00000000 {
                                compatible = "arm,vexpress-flash", "cfi-flash";
index e63c5c0bfb43f03683c5a01048f563719f96073e..679537e17ff5c7119ffb62ad158e2400090685c7 100644 (file)
        };
 
        bus@8000000 {
-               compatible = "simple-bus";
-
-               #address-cells = <2>;
-               #size-cells = <1>;
-               ranges = <0 0 0 0x08000000 0x04000000>,
-                        <1 0 0 0x14000000 0x04000000>,
-                        <2 0 0 0x18000000 0x04000000>,
-                        <3 0 0 0x1c000000 0x04000000>,
-                        <4 0 0 0x0c000000 0x04000000>,
-                        <5 0 0 0x10000000 0x04000000>;
-
-               #interrupt-cells = <1>;
-               interrupt-map-mask = <0 0 63>;
-               interrupt-map = <0 0  0 &gic 0  0 4>,
-                               <0 0  1 &gic 0  1 4>,
-                               <0 0  2 &gic 0  2 4>,
-                               <0 0  3 &gic 0  3 4>,
-                               <0 0  4 &gic 0  4 4>,
-                               <0 0  5 &gic 0  5 4>,
-                               <0 0  6 &gic 0  6 4>,
-                               <0 0  7 &gic 0  7 4>,
-                               <0 0  8 &gic 0  8 4>,
-                               <0 0  9 &gic 0  9 4>,
-                               <0 0 10 &gic 0 10 4>,
-                               <0 0 11 &gic 0 11 4>,
-                               <0 0 12 &gic 0 12 4>,
-                               <0 0 13 &gic 0 13 4>,
-                               <0 0 14 &gic 0 14 4>,
-                               <0 0 15 &gic 0 15 4>,
-                               <0 0 16 &gic 0 16 4>,
-                               <0 0 17 &gic 0 17 4>,
-                               <0 0 18 &gic 0 18 4>,
-                               <0 0 19 &gic 0 19 4>,
-                               <0 0 20 &gic 0 20 4>,
-                               <0 0 21 &gic 0 21 4>,
-                               <0 0 22 &gic 0 22 4>,
-                               <0 0 23 &gic 0 23 4>,
-                               <0 0 24 &gic 0 24 4>,
-                               <0 0 25 &gic 0 25 4>,
-                               <0 0 26 &gic 0 26 4>,
-                               <0 0 27 &gic 0 27 4>,
-                               <0 0 28 &gic 0 28 4>,
-                               <0 0 29 &gic 0 29 4>,
-                               <0 0 30 &gic 0 30 4>,
-                               <0 0 31 &gic 0 31 4>,
-                               <0 0 32 &gic 0 32 4>,
-                               <0 0 33 &gic 0 33 4>,
-                               <0 0 34 &gic 0 34 4>,
-                               <0 0 35 &gic 0 35 4>,
-                               <0 0 36 &gic 0 36 4>,
-                               <0 0 37 &gic 0 37 4>,
-                               <0 0 38 &gic 0 38 4>,
-                               <0 0 39 &gic 0 39 4>,
-                               <0 0 40 &gic 0 40 4>,
-                               <0 0 41 &gic 0 41 4>,
-                               <0 0 42 &gic 0 42 4>;
+               ranges = <0x8000000 0 0x8000000 0x18000000>;
        };
 
        site2: hsb@40000000 {
index 012d40a7228c14293588d99fc8e6466ab7542ee4..511e87cc2bc5e0aa3b8bb9b566e56c997ffc816c 100644 (file)
        };
 
        smb: bus@8000000 {
-               compatible = "simple-bus";
-
-               #address-cells = <2>;
-               #size-cells = <1>;
-               ranges = <0 0 0 0x08000000 0x04000000>,
-                        <1 0 0 0x14000000 0x04000000>,
-                        <2 0 0 0x18000000 0x04000000>,
-                        <3 0 0 0x1c000000 0x04000000>,
-                        <4 0 0 0x0c000000 0x04000000>,
-                        <5 0 0 0x10000000 0x04000000>;
-
-               #interrupt-cells = <1>;
-               interrupt-map-mask = <0 0 63>;
-               interrupt-map = <0 0  0 &gic 0  0 4>,
-                               <0 0  1 &gic 0  1 4>,
-                               <0 0  2 &gic 0  2 4>,
-                               <0 0  3 &gic 0  3 4>,
-                               <0 0  4 &gic 0  4 4>,
-                               <0 0  5 &gic 0  5 4>,
-                               <0 0  6 &gic 0  6 4>,
-                               <0 0  7 &gic 0  7 4>,
-                               <0 0  8 &gic 0  8 4>,
-                               <0 0  9 &gic 0  9 4>,
-                               <0 0 10 &gic 0 10 4>,
-                               <0 0 11 &gic 0 11 4>,
-                               <0 0 12 &gic 0 12 4>,
-                               <0 0 13 &gic 0 13 4>,
-                               <0 0 14 &gic 0 14 4>,
-                               <0 0 15 &gic 0 15 4>,
-                               <0 0 16 &gic 0 16 4>,
-                               <0 0 17 &gic 0 17 4>,
-                               <0 0 18 &gic 0 18 4>,
-                               <0 0 19 &gic 0 19 4>,
-                               <0 0 20 &gic 0 20 4>,
-                               <0 0 21 &gic 0 21 4>,
-                               <0 0 22 &gic 0 22 4>,
-                               <0 0 23 &gic 0 23 4>,
-                               <0 0 24 &gic 0 24 4>,
-                               <0 0 25 &gic 0 25 4>,
-                               <0 0 26 &gic 0 26 4>,
-                               <0 0 27 &gic 0 27 4>,
-                               <0 0 28 &gic 0 28 4>,
-                               <0 0 29 &gic 0 29 4>,
-                               <0 0 30 &gic 0 30 4>,
-                               <0 0 31 &gic 0 31 4>,
-                               <0 0 32 &gic 0 32 4>,
-                               <0 0 33 &gic 0 33 4>,
-                               <0 0 34 &gic 0 34 4>,
-                               <0 0 35 &gic 0 35 4>,
-                               <0 0 36 &gic 0 36 4>,
-                               <0 0 37 &gic 0 37 4>,
-                               <0 0 38 &gic 0 38 4>,
-                               <0 0 39 &gic 0 39 4>,
-                               <0 0 40 &gic 0 40 4>,
-                               <0 0 41 &gic 0 41 4>,
-                               <0 0 42 &gic 0 42 4>;
+               ranges = <0x8000000 0 0x8000000 0x18000000>;
        };
 
        site2: hsb@40000000 {
index 7aa64ae2577981469c29b61a9a11eb18587020c7..3b88209bacea2f8664dcc4653f6bbca2397de4cb 100644 (file)
        };
 
        smb: bus@8000000 {
-               compatible = "simple-bus";
-
-               #address-cells = <2>;
-               #size-cells = <1>;
-               ranges = <0 0 0x08000000 0x04000000>,
-                        <1 0 0x14000000 0x04000000>,
-                        <2 0 0x18000000 0x04000000>,
-                        <3 0 0x1c000000 0x04000000>,
-                        <4 0 0x0c000000 0x04000000>,
-                        <5 0 0x10000000 0x04000000>;
-
-               #interrupt-cells = <1>;
-               interrupt-map-mask = <0 0 63>;
-               interrupt-map = <0 0  0 &gic 0  0 4>,
-                               <0 0  1 &gic 0  1 4>,
-                               <0 0  2 &gic 0  2 4>,
-                               <0 0  3 &gic 0  3 4>,
-                               <0 0  4 &gic 0  4 4>,
-                               <0 0  5 &gic 0  5 4>,
-                               <0 0  6 &gic 0  6 4>,
-                               <0 0  7 &gic 0  7 4>,
-                               <0 0  8 &gic 0  8 4>,
-                               <0 0  9 &gic 0  9 4>,
-                               <0 0 10 &gic 0 10 4>,
-                               <0 0 11 &gic 0 11 4>,
-                               <0 0 12 &gic 0 12 4>,
-                               <0 0 13 &gic 0 13 4>,
-                               <0 0 14 &gic 0 14 4>,
-                               <0 0 15 &gic 0 15 4>,
-                               <0 0 16 &gic 0 16 4>,
-                               <0 0 17 &gic 0 17 4>,
-                               <0 0 18 &gic 0 18 4>,
-                               <0 0 19 &gic 0 19 4>,
-                               <0 0 20 &gic 0 20 4>,
-                               <0 0 21 &gic 0 21 4>,
-                               <0 0 22 &gic 0 22 4>,
-                               <0 0 23 &gic 0 23 4>,
-                               <0 0 24 &gic 0 24 4>,
-                               <0 0 25 &gic 0 25 4>,
-                               <0 0 26 &gic 0 26 4>,
-                               <0 0 27 &gic 0 27 4>,
-                               <0 0 28 &gic 0 28 4>,
-                               <0 0 29 &gic 0 29 4>,
-                               <0 0 30 &gic 0 30 4>,
-                               <0 0 31 &gic 0 31 4>,
-                               <0 0 32 &gic 0 32 4>,
-                               <0 0 33 &gic 0 33 4>,
-                               <0 0 34 &gic 0 34 4>,
-                               <0 0 35 &gic 0 35 4>,
-                               <0 0 36 &gic 0 36 4>,
-                               <0 0 37 &gic 0 37 4>,
-                               <0 0 38 &gic 0 38 4>,
-                               <0 0 39 &gic 0 39 4>,
-                               <0 0 40 &gic 0 40 4>,
-                               <0 0 41 &gic 0 41 4>,
-                               <0 0 42 &gic 0 42 4>;
+               ranges = <0 0x8000000 0x18000000>;
        };
 
        site2: hsb@40000000 {
index 4c58479558562a5fa347bbe46bbb6fee280e8f03..5916e4877eace701c1affb59ea6d446a43079679 100644 (file)
                };
        };
 
-       smb: bus@4000000 {
-               compatible = "simple-bus";
-
-               #address-cells = <2>;
-               #size-cells = <1>;
-               ranges = <0 0 0x40000000 0x04000000>,
-                        <1 0 0x44000000 0x04000000>,
-                        <2 0 0x48000000 0x04000000>,
-                        <3 0 0x4c000000 0x04000000>,
-                        <7 0 0x10000000 0x00020000>;
-
-               #interrupt-cells = <1>;
-               interrupt-map-mask = <0 0 63>;
-               interrupt-map = <0 0  0 &gic 0  0 4>,
-                               <0 0  1 &gic 0  1 4>,
-                               <0 0  2 &gic 0  2 4>,
-                               <0 0  3 &gic 0  3 4>,
-                               <0 0  4 &gic 0  4 4>,
-                               <0 0  5 &gic 0  5 4>,
-                               <0 0  6 &gic 0  6 4>,
-                               <0 0  7 &gic 0  7 4>,
-                               <0 0  8 &gic 0  8 4>,
-                               <0 0  9 &gic 0  9 4>,
-                               <0 0 10 &gic 0 10 4>,
-                               <0 0 11 &gic 0 11 4>,
-                               <0 0 12 &gic 0 12 4>,
-                               <0 0 13 &gic 0 13 4>,
-                               <0 0 14 &gic 0 14 4>,
-                               <0 0 15 &gic 0 15 4>,
-                               <0 0 16 &gic 0 16 4>,
-                               <0 0 17 &gic 0 17 4>,
-                               <0 0 18 &gic 0 18 4>,
-                               <0 0 19 &gic 0 19 4>,
-                               <0 0 20 &gic 0 20 4>,
-                               <0 0 21 &gic 0 21 4>,
-                               <0 0 22 &gic 0 22 4>,
-                               <0 0 23 &gic 0 23 4>,
-                               <0 0 24 &gic 0 24 4>,
-                               <0 0 25 &gic 0 25 4>,
-                               <0 0 26 &gic 0 26 4>,
-                               <0 0 27 &gic 0 27 4>,
-                               <0 0 28 &gic 0 28 4>,
-                               <0 0 29 &gic 0 29 4>,
-                               <0 0 30 &gic 0 30 4>,
-                               <0 0 31 &gic 0 31 4>,
-                               <0 0 32 &gic 0 32 4>,
-                               <0 0 33 &gic 0 33 4>,
-                               <0 0 34 &gic 0 34 4>,
-                               <0 0 35 &gic 0 35 4>,
-                               <0 0 36 &gic 0 36 4>,
-                               <0 0 37 &gic 0 37 4>,
-                               <0 0 38 &gic 0 38 4>,
-                               <0 0 39 &gic 0 39 4>,
-                               <0 0 40 &gic 0 40 4>,
-                               <0 0 41 &gic 0 41 4>,
-                               <0 0 42 &gic 0 42 4>;
-       };
-
        site2: hsb@e0000000 {
                compatible = "simple-bus";
                #address-cells = <1>;
index efeb5724d9e9a8755f708bc07f085181c6811447..6237ede2f0c70010c6bc5fd9c3688365a602a787 100644 (file)
@@ -40,7 +40,9 @@ EXPORT_SYMBOL(sharpsl_param);
 
 void sharpsl_save_param(void)
 {
-       memcpy(&sharpsl_param, param_start(PARAM_BASE), sizeof(struct sharpsl_param_info));
+       struct sharpsl_param_info *params = param_start(PARAM_BASE);
+
+       memcpy(&sharpsl_param, params, sizeof(*params));
 
        if (sharpsl_param.comadj_keyword != COMADJ_MAGIC)
                sharpsl_param.comadj=-1;
index d2d5f1cf815f207f30aa78d2511a0cb568f2a6c6..e6ff844821cfb8084a2f320182b22061476bb254 100644 (file)
@@ -76,6 +76,7 @@ CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_DRM=y
 CONFIG_DRM_PANEL_ILITEK_IL9322=y
 CONFIG_DRM_TVE200=y
+CONFIG_FB=y
 CONFIG_LOGO=y
 CONFIG_USB=y
 CONFIG_USB_MON=y
index ccee86d0045dd60fbd1fbde612e8b0d53f8f1ac6..5e4128dadd8d54b21d841b9d2d0920abeed58b76 100644 (file)
@@ -292,6 +292,7 @@ CONFIG_DRM_IMX_LDB=y
 CONFIG_DRM_IMX_HDMI=y
 CONFIG_DRM_ETNAVIV=y
 CONFIG_DRM_MXSFB=y
+CONFIG_FB=y
 CONFIG_FB_MODE_HELPERS=y
 CONFIG_LCD_CLASS_DEVICE=y
 CONFIG_LCD_L4F00242T03=y
index ba67c4717dccec3f0b2748dd1b885dcf4844d293..33572998dbbe08f6857dc4efeebfaac6244e41b1 100644 (file)
@@ -197,7 +197,6 @@ CONFIG_PCI_EPF_TEST=m
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_OMAP_OCP2SCP=y
-CONFIG_SIMPLE_PM_BUS=y
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_BLOCK=y
@@ -456,6 +455,7 @@ CONFIG_PINCTRL_STMFX=y
 CONFIG_PINCTRL_PALMAS=y
 CONFIG_PINCTRL_OWL=y
 CONFIG_PINCTRL_S500=y
+CONFIG_PINCTRL_MSM=y
 CONFIG_PINCTRL_APQ8064=y
 CONFIG_PINCTRL_APQ8084=y
 CONFIG_PINCTRL_IPQ8064=y
@@ -725,6 +725,7 @@ CONFIG_DRM_PL111=m
 CONFIG_DRM_LIMA=m
 CONFIG_DRM_PANFROST=m
 CONFIG_DRM_ASPEED_GFX=m
+CONFIG_FB=y
 CONFIG_FB_EFI=y
 CONFIG_FB_WM8505=y
 CONFIG_FB_SH_MOBILE_LCDC=y
@@ -1122,6 +1123,7 @@ CONFIG_PHY_DM816X_USB=m
 CONFIG_OMAP_USB2=y
 CONFIG_TI_PIPE3=y
 CONFIG_TWL4030_USB=m
+CONFIG_RAS=y
 CONFIG_NVMEM_IMX_OCOTP=y
 CONFIG_ROCKCHIP_EFUSE=m
 CONFIG_NVMEM_SUNXI_SID=y
index cae0db6b4eaf30e2cf943a2678f2b66a442f2d4a..de37f7e9099910b952073904147df3f557e7a3e7 100644 (file)
@@ -46,7 +46,6 @@ CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=64
-CONFIG_SIMPLE_PM_BUS=y
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_BLOCK=y
index d9a27e4e09140b43e906528703cf686866b83104..18d2a960b2d21a04c3c5213e828dd34ee674dde0 100644 (file)
@@ -40,7 +40,6 @@ CONFIG_PCI_RCAR_GEN2=y
 CONFIG_PCIE_RCAR_HOST=y
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_SIMPLE_PM_BUS=y
 CONFIG_MTD=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
index 084d1c07c2d001043ad3504a15ea82300a7b5f1a..36fbc332925263a6c72277ea19a5864bf78d2a88 100644 (file)
@@ -176,6 +176,7 @@ extern int __get_user_64t_4(void *);
                register unsigned long __l asm("r1") = __limit;         \
                register int __e asm("r0");                             \
                unsigned int __ua_flags = uaccess_save_and_enable();    \
+               int __tmp_e;                                            \
                switch (sizeof(*(__p))) {                               \
                case 1:                                                 \
                        if (sizeof((x)) >= 8)                           \
@@ -203,9 +204,10 @@ extern int __get_user_64t_4(void *);
                        break;                                          \
                default: __e = __get_user_bad(); break;                 \
                }                                                       \
+               __tmp_e = __e;                                          \
                uaccess_restore(__ua_flags);                            \
                x = (typeof(*(p))) __r2;                                \
-               __e;                                                    \
+               __tmp_e;                                                \
        })
 
 #define get_user(x, p)                                                 \
index 29070eb8df7d9d89c9e1b96f6d96b70514430665..3fc7f9750ce4b83043daed7bbc40cc52fb339f4c 100644 (file)
@@ -253,7 +253,7 @@ __create_page_tables:
        add     r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
        ldr     r6, =(_end - 1)
        adr_l   r5, kernel_sec_start            @ _pa(kernel_sec_start)
-#ifdef CONFIG_CPU_ENDIAN_BE8
+#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
        str     r8, [r5, #4]                    @ Save physical start of kernel (BE)
 #else
        str     r8, [r5]                        @ Save physical start of kernel (LE)
@@ -266,7 +266,7 @@ __create_page_tables:
        bls     1b
        eor     r3, r3, r7                      @ Remove the MMU flags
        adr_l   r5, kernel_sec_end              @ _pa(kernel_sec_end)
-#ifdef CONFIG_CPU_ENDIAN_BE8
+#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
        str     r3, [r5, #4]                    @ Save physical end of kernel (BE)
 #else
        str     r3, [r5]                        @ Save physical end of kernel (LE)
index 4a7edc6e848f3a02ed1f9cee8f5fa7d45bcb85ad..195dff58bafc73277667a2f3f45badd3490022cc 100644 (file)
@@ -136,7 +136,7 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
                for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
                        if (p >= bottom && p < top) {
                                unsigned long val;
-                               if (get_kernel_nofault(val, (unsigned long *)p))
+                               if (!get_kernel_nofault(val, (unsigned long *)p))
                                        sprintf(str + i * 9, " %08lx", val);
                                else
                                        sprintf(str + i * 9, " ????????");
index 50136828f5b54eb88fd7c7628837f4d64bed7636..f14c2360ea0b1e4efcc67a6798e9daf7283a38c0 100644 (file)
@@ -40,6 +40,10 @@ SECTIONS
                ARM_DISCARD
                *(.alt.smp.init)
                *(.pv_table)
+#ifndef CONFIG_ARM_UNWIND
+               *(.ARM.exidx) *(.ARM.exidx.*)
+               *(.ARM.extab) *(.ARM.extab.*)
+#endif
        }
 
        . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
@@ -172,7 +176,7 @@ ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
 ASSERT((_end - __bss_start) >= 12288, ".bss too small for CONFIG_XIP_DEFLATED_DATA")
 #endif
 
-#ifdef CONFIG_ARM_MPU
+#if defined(CONFIG_ARM_MPU) && !defined(CONFIG_COMPILE_TEST)
 /*
  * Due to PMSAv7 restriction on base address and size we have to
  * enforce minimal alignment restrictions. It was seen that weaker
index d6cfe7c4bb00f7f6e6b1132b8f95370e86f30096..8711d6824c1fa558ff6bfef963e9f6ff464ef7fd 100644 (file)
@@ -47,12 +47,26 @@ struct at91_pm_bu {
        unsigned long ddr_phy_calibration[BACKUP_DDR_PHY_CALIBRATION];
 };
 
+/*
+ * struct at91_pm_sfrbu_offsets: registers mapping for SFRBU
+ * @pswbu: power switch BU control registers
+ */
+struct at91_pm_sfrbu_regs {
+       struct {
+               u32 key;
+               u32 ctrl;
+               u32 state;
+               u32 softsw;
+       } pswbu;
+};
+
 /**
  * struct at91_soc_pm - AT91 SoC power management data structure
  * @config_shdwc_ws: wakeup sources configuration function for SHDWC
  * @config_pmc_ws: wakeup srouces configuration function for PMC
  * @ws_ids: wakup sources of_device_id array
  * @data: PM data to be used on last phase of suspend
+ * @sfrbu_regs: SFRBU registers mapping
  * @bu: backup unit mapped data (for backup mode)
  * @memcs: memory chip select
  */
@@ -62,6 +76,7 @@ struct at91_soc_pm {
        const struct of_device_id *ws_ids;
        struct at91_pm_bu *bu;
        struct at91_pm_data data;
+       struct at91_pm_sfrbu_regs sfrbu_regs;
        void *memcs;
 };
 
@@ -356,9 +371,36 @@ static int at91_suspend_finish(unsigned long val)
        return 0;
 }
 
+static void at91_pm_switch_ba_to_vbat(void)
+{
+       unsigned int offset = offsetof(struct at91_pm_sfrbu_regs, pswbu);
+       unsigned int val;
+
+       /* Just for safety. */
+       if (!soc_pm.data.sfrbu)
+               return;
+
+       val = readl(soc_pm.data.sfrbu + offset);
+
+       /* Already on VBAT. */
+       if (!(val & soc_pm.sfrbu_regs.pswbu.state))
+               return;
+
+       val &= ~soc_pm.sfrbu_regs.pswbu.softsw;
+       val |= soc_pm.sfrbu_regs.pswbu.key | soc_pm.sfrbu_regs.pswbu.ctrl;
+       writel(val, soc_pm.data.sfrbu + offset);
+
+       /* Wait for update. */
+       val = readl(soc_pm.data.sfrbu + offset);
+       while (val & soc_pm.sfrbu_regs.pswbu.state)
+               val = readl(soc_pm.data.sfrbu + offset);
+}
+
 static void at91_pm_suspend(suspend_state_t state)
 {
        if (soc_pm.data.mode == AT91_PM_BACKUP) {
+               at91_pm_switch_ba_to_vbat();
+
                cpu_suspend(0, at91_suspend_finish);
 
                /* The SRAM is lost between suspend cycles */
@@ -589,18 +631,22 @@ static const struct of_device_id ramc_phy_ids[] __initconst = {
        { /* Sentinel. */ },
 };
 
-static __init void at91_dt_ramc(bool phy_mandatory)
+static __init int at91_dt_ramc(bool phy_mandatory)
 {
        struct device_node *np;
        const struct of_device_id *of_id;
        int idx = 0;
        void *standby = NULL;
        const struct ramc_info *ramc;
+       int ret;
 
        for_each_matching_node_and_match(np, ramc_ids, &of_id) {
                soc_pm.data.ramc[idx] = of_iomap(np, 0);
-               if (!soc_pm.data.ramc[idx])
-                       panic(pr_fmt("unable to map ramc[%d] cpu registers\n"), idx);
+               if (!soc_pm.data.ramc[idx]) {
+                       pr_err("unable to map ramc[%d] cpu registers\n", idx);
+                       ret = -ENOMEM;
+                       goto unmap_ramc;
+               }
 
                ramc = of_id->data;
                if (ramc) {
@@ -612,25 +658,42 @@ static __init void at91_dt_ramc(bool phy_mandatory)
                idx++;
        }
 
-       if (!idx)
-               panic(pr_fmt("unable to find compatible ram controller node in dtb\n"));
+       if (!idx) {
+               pr_err("unable to find compatible ram controller node in dtb\n");
+               ret = -ENODEV;
+               goto unmap_ramc;
+       }
 
        /* Lookup for DDR PHY node, if any. */
        for_each_matching_node_and_match(np, ramc_phy_ids, &of_id) {
                soc_pm.data.ramc_phy = of_iomap(np, 0);
-               if (!soc_pm.data.ramc_phy)
-                       panic(pr_fmt("unable to map ramc phy cpu registers\n"));
+               if (!soc_pm.data.ramc_phy) {
+                       pr_err("unable to map ramc phy cpu registers\n");
+                       ret = -ENOMEM;
+                       goto unmap_ramc;
+               }
        }
 
-       if (phy_mandatory && !soc_pm.data.ramc_phy)
-               panic(pr_fmt("DDR PHY is mandatory!\n"));
+       if (phy_mandatory && !soc_pm.data.ramc_phy) {
+               pr_err("DDR PHY is mandatory!\n");
+               ret = -ENODEV;
+               goto unmap_ramc;
+       }
 
        if (!standby) {
                pr_warn("ramc no standby function available\n");
-               return;
+               return 0;
        }
 
        at91_cpuidle_device.dev.platform_data = standby;
+
+       return 0;
+
+unmap_ramc:
+       while (idx)
+               iounmap(soc_pm.data.ramc[--idx]);
+
+       return ret;
 }
 
 static void at91rm9200_idle(void)
@@ -1017,6 +1080,8 @@ static void __init at91_pm_init(void (*pm_idle)(void))
 
 void __init at91rm9200_pm_init(void)
 {
+       int ret;
+
        if (!IS_ENABLED(CONFIG_SOC_AT91RM9200))
                return;
 
@@ -1028,7 +1093,9 @@ void __init at91rm9200_pm_init(void)
        soc_pm.data.standby_mode = AT91_PM_STANDBY;
        soc_pm.data.suspend_mode = AT91_PM_ULP0;
 
-       at91_dt_ramc(false);
+       ret = at91_dt_ramc(false);
+       if (ret)
+               return;
 
        /*
         * AT91RM9200 SDRAM low-power mode cannot be used with self-refresh.
@@ -1046,13 +1113,17 @@ void __init sam9x60_pm_init(void)
        static const int iomaps[] __initconst = {
                [AT91_PM_ULP1]          = AT91_PM_IOMAP(SHDWC),
        };
+       int ret;
 
        if (!IS_ENABLED(CONFIG_SOC_SAM9X60))
                return;
 
        at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
        at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
-       at91_dt_ramc(false);
+       ret = at91_dt_ramc(false);
+       if (ret)
+               return;
+
        at91_pm_init(NULL);
 
        soc_pm.ws_ids = sam9x60_ws_ids;
@@ -1061,6 +1132,8 @@ void __init sam9x60_pm_init(void)
 
 void __init at91sam9_pm_init(void)
 {
+       int ret;
+
        if (!IS_ENABLED(CONFIG_SOC_AT91SAM9))
                return;
 
@@ -1072,7 +1145,10 @@ void __init at91sam9_pm_init(void)
        soc_pm.data.standby_mode = AT91_PM_STANDBY;
        soc_pm.data.suspend_mode = AT91_PM_ULP0;
 
-       at91_dt_ramc(false);
+       ret = at91_dt_ramc(false);
+       if (ret)
+               return;
+
        at91_pm_init(at91sam9_idle);
 }
 
@@ -1081,12 +1157,16 @@ void __init sama5_pm_init(void)
        static const int modes[] __initconst = {
                AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST,
        };
+       int ret;
 
        if (!IS_ENABLED(CONFIG_SOC_SAMA5))
                return;
 
        at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
-       at91_dt_ramc(false);
+       ret = at91_dt_ramc(false);
+       if (ret)
+               return;
+
        at91_pm_init(NULL);
 }
 
@@ -1101,18 +1181,27 @@ void __init sama5d2_pm_init(void)
                [AT91_PM_BACKUP]        = AT91_PM_IOMAP(SHDWC) |
                                          AT91_PM_IOMAP(SFRBU),
        };
+       int ret;
 
        if (!IS_ENABLED(CONFIG_SOC_SAMA5D2))
                return;
 
        at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
        at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
-       at91_dt_ramc(false);
+       ret = at91_dt_ramc(false);
+       if (ret)
+               return;
+
        at91_pm_init(NULL);
 
        soc_pm.ws_ids = sama5d2_ws_ids;
        soc_pm.config_shdwc_ws = at91_sama5d2_config_shdwc_ws;
        soc_pm.config_pmc_ws = at91_sama5d2_config_pmc_ws;
+
+       soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
+       soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0);
+       soc_pm.sfrbu_regs.pswbu.softsw = BIT(1);
+       soc_pm.sfrbu_regs.pswbu.state = BIT(3);
 }
 
 void __init sama7_pm_init(void)
@@ -1127,18 +1216,27 @@ void __init sama7_pm_init(void)
                [AT91_PM_BACKUP]        = AT91_PM_IOMAP(SFRBU) |
                                          AT91_PM_IOMAP(SHDWC),
        };
+       int ret;
 
        if (!IS_ENABLED(CONFIG_SOC_SAMA7))
                return;
 
        at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
 
-       at91_dt_ramc(true);
+       ret = at91_dt_ramc(true);
+       if (ret)
+               return;
+
        at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
        at91_pm_init(NULL);
 
        soc_pm.ws_ids = sama7g5_ws_ids;
        soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
+
+       soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
+       soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0);
+       soc_pm.sfrbu_regs.pswbu.softsw = BIT(1);
+       soc_pm.sfrbu_regs.pswbu.state = BIT(2);
 }
 
 static int __init at91_pm_modes_select(char *str)
index cbd61a3bcab1d0bae012f6b0a86c3b3aecfd7133..fdb4f63ecde4bc6a2d523c7ee3d41ff7a60b287c 100644 (file)
@@ -1014,31 +1014,55 @@ ENTRY(at91_pm_suspend_in_sram)
        mov     tmp1, #0
        mcr     p15, 0, tmp1, c7, c10, 4
 
+       /* Flush tlb. */
+       mov     r4, #0
+       mcr     p15, 0, r4, c8, c7, 0
+
+       ldr     tmp1, [r0, #PM_DATA_PMC_MCKR_OFFSET]
+       str     tmp1, .mckr_offset
+       ldr     tmp1, [r0, #PM_DATA_PMC_VERSION]
+       str     tmp1, .pmc_version
+       ldr     tmp1, [r0, #PM_DATA_MEMCTRL]
+       str     tmp1, .memtype
+       ldr     tmp1, [r0, #PM_DATA_MODE]
+       str     tmp1, .pm_mode
+
+       /*
+        * ldrne below are here to preload their address in the TLB as access
+        * to RAM may be limited while in self-refresh.
+        */
        ldr     tmp1, [r0, #PM_DATA_PMC]
        str     tmp1, .pmc_base
+       cmp     tmp1, #0
+       ldrne   tmp2, [tmp1, #0]
+
        ldr     tmp1, [r0, #PM_DATA_RAMC0]
        str     tmp1, .sramc_base
+       cmp     tmp1, #0
+       ldrne   tmp2, [tmp1, #0]
+
        ldr     tmp1, [r0, #PM_DATA_RAMC1]
        str     tmp1, .sramc1_base
+       cmp     tmp1, #0
+       ldrne   tmp2, [tmp1, #0]
+
+#ifndef CONFIG_SOC_SAM_V4_V5
+       /* ldrne below are here to preload their address in the TLB */
        ldr     tmp1, [r0, #PM_DATA_RAMC_PHY]
        str     tmp1, .sramc_phy_base
-       ldr     tmp1, [r0, #PM_DATA_MEMCTRL]
-       str     tmp1, .memtype
-       ldr     tmp1, [r0, #PM_DATA_MODE]
-       str     tmp1, .pm_mode
-       ldr     tmp1, [r0, #PM_DATA_PMC_MCKR_OFFSET]
-       str     tmp1, .mckr_offset
-       ldr     tmp1, [r0, #PM_DATA_PMC_VERSION]
-       str     tmp1, .pmc_version
-       /* Both ldrne below are here to preload their address in the TLB */
+       cmp     tmp1, #0
+       ldrne   tmp2, [tmp1, #0]
+
        ldr     tmp1, [r0, #PM_DATA_SHDWC]
        str     tmp1, .shdwc
        cmp     tmp1, #0
        ldrne   tmp2, [tmp1, #0]
+
        ldr     tmp1, [r0, #PM_DATA_SFRBU]
        str     tmp1, .sfrbu
        cmp     tmp1, #0
        ldrne   tmp2, [tmp1, #0x10]
+#endif
 
        /* Active the self-refresh mode */
        at91_sramc_self_refresh_ena
index 7a4bd8838036ffb7a7233325b6b6696170d173d4..ddf873f35e2b041ee2874a9b267bbe7437a86704 100644 (file)
@@ -11,7 +11,7 @@
 
 #define LSR_THRE       0x20
 
-static void putc(const char c)
+static inline void putc(const char c)
 {
        int i;
 
@@ -24,7 +24,7 @@ static void putc(const char c)
        *UART_THR = c;
 }
 
-static void flush(void)
+static inline void flush(void)
 {
 }
 
index 11dcc369ec14a3353d94504b10fa4c476124225c..c9d7c29d95e1e1eddf08196f7e42c34b7de496f4 100644 (file)
@@ -172,6 +172,9 @@ static void __init imx6q_init_machine(void)
                                imx_get_soc_revision());
 
        imx6q_enet_phy_init();
+
+       of_platform_default_populate(NULL, NULL, NULL);
+
        imx_anatop_init();
        cpu_is_imx6q() ?  imx6q_pm_init() : imx6dl_pm_init();
        imx6q_1588_init();
index 9244437cb1b9b5345e248ee542af658b4bea8a72..f2ecca339910a0e7b3cf339dd250b4be2b10b434 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/genalloc.h>
+#include <linux/irqchip/arm-gic.h>
 #include <linux/mfd/syscon.h>
 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
 #include <linux/of.h>
@@ -619,6 +620,7 @@ static void __init imx6_pm_common_init(const struct imx6_pm_socdata
 
 static void imx6_pm_stby_poweroff(void)
 {
+       gic_cpu_if_down(0);
        imx6_set_lpm(STOP_POWER_OFF);
        imx6q_suspend_finish(0);
 
index 95fd1fbb082607128aca4cf146d2e82a430150c2..59a8e8cc44693b0f4720f4945f1e1fd1d5183a7b 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/iopoll.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/platform_device.h>
 #include <linux/reset-controller.h>
 #include <linux/smp.h>
 #include <asm/smp_plat.h>
@@ -81,11 +82,6 @@ static const struct reset_control_ops imx_src_ops = {
        .reset = imx_src_reset_module,
 };
 
-static struct reset_controller_dev imx_reset_controller = {
-       .ops = &imx_src_ops,
-       .nr_resets = ARRAY_SIZE(sw_reset_bits),
-};
-
 static void imx_gpcv2_set_m_core_pgc(bool enable, u32 offset)
 {
        writel_relaxed(enable, gpc_base + offset);
@@ -177,10 +173,6 @@ void __init imx_src_init(void)
        src_base = of_iomap(np, 0);
        WARN_ON(!src_base);
 
-       imx_reset_controller.of_node = np;
-       if (IS_ENABLED(CONFIG_RESET_CONTROLLER))
-               reset_controller_register(&imx_reset_controller);
-
        /*
         * force warm reset sources to generate cold reset
         * for a more reliable restart
@@ -214,3 +206,33 @@ void __init imx7_src_init(void)
        if (!gpc_base)
                return;
 }
+
+static const struct of_device_id imx_src_dt_ids[] = {
+       { .compatible = "fsl,imx51-src" },
+       { /* sentinel */ }
+};
+
+static int imx_src_probe(struct platform_device *pdev)
+{
+       struct reset_controller_dev *rcdev;
+
+       rcdev = devm_kzalloc(&pdev->dev, sizeof(*rcdev), GFP_KERNEL);
+       if (!rcdev)
+               return -ENOMEM;
+
+       rcdev->ops = &imx_src_ops;
+       rcdev->dev = &pdev->dev;
+       rcdev->of_node = pdev->dev.of_node;
+       rcdev->nr_resets = ARRAY_SIZE(sw_reset_bits);
+
+       return devm_reset_controller_register(&pdev->dev, rcdev);
+}
+
+static struct platform_driver imx_src_driver = {
+       .driver = {
+               .name = "imx-src",
+               .of_match_table = imx_src_dt_ids,
+       },
+       .probe = imx_src_probe,
+};
+builtin_platform_driver(imx_src_driver);
index 36bc0000cb6ab8705e3dd1f865109141fdf8c462..ba3a350479c8d68123d62d0dcd35171f63e7835e 100644 (file)
@@ -9,16 +9,4 @@
 /* REVISIT: omap1 legacy drivers still rely on this */
 #include <mach/soc.h>
 
-/*
- * Bus address is physical address, except for OMAP-1510 Local Bus.
- * OMAP-1510 bus address is translated into a Local Bus address if the
- * OMAP bus type is lbus. We do the address translation based on the
- * device overriding the defaults used in the dma-mapping API.
- */
-
-/*
- * OMAP-1510 Local Bus address offset
- */
-#define OMAP1510_LB_OFFSET     UL(0x30000000)
-
 #endif
index 86d3b3c157af444901d69033a7e24a82bc0addd7..e60831c82b789fac6860517d1734d740e019ef4c 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/platform_device.h>
 #include <linux/dma-map-ops.h>
 #include <linux/io.h>
+#include <linux/delay.h>
 
 #include <asm/irq.h>
 
@@ -206,8 +207,6 @@ static inline void udc_device_init(struct omap_usb_config *pdata)
 
 #endif
 
-#if    IS_ENABLED(CONFIG_USB_OHCI_HCD)
-
 /* The dmamask must be set for OHCI to work */
 static u64 ohci_dmamask = ~(u32)0;
 
@@ -236,20 +235,15 @@ static struct platform_device ohci_device = {
 
 static inline void ohci_device_init(struct omap_usb_config *pdata)
 {
+       if (!IS_ENABLED(CONFIG_USB_OHCI_HCD))
+               return;
+
        if (cpu_is_omap7xx())
                ohci_resources[1].start = INT_7XX_USB_HHC_1;
        pdata->ohci_device = &ohci_device;
        pdata->ocpi_enable = &ocpi_enable;
 }
 
-#else
-
-static inline void ohci_device_init(struct omap_usb_config *pdata)
-{
-}
-
-#endif
-
 #if    defined(CONFIG_USB_OTG) && defined(CONFIG_ARCH_OMAP_OTG)
 
 static struct resource otg_resources[] = {
@@ -534,33 +528,87 @@ bad:
 }
 
 #ifdef CONFIG_ARCH_OMAP15XX
+/* OMAP-1510 OHCI has its own MMU for DMA */
+#define OMAP1510_LB_MEMSIZE    32      /* Should be same as SDRAM size */
+#define OMAP1510_LB_CLOCK_DIV  0xfffec10c
+#define OMAP1510_LB_MMU_CTL    0xfffec208
+#define OMAP1510_LB_MMU_LCK    0xfffec224
+#define OMAP1510_LB_MMU_LD_TLB 0xfffec228
+#define OMAP1510_LB_MMU_CAM_H  0xfffec22c
+#define OMAP1510_LB_MMU_CAM_L  0xfffec230
+#define OMAP1510_LB_MMU_RAM_H  0xfffec234
+#define OMAP1510_LB_MMU_RAM_L  0xfffec238
 
-/* ULPD_DPLL_CTRL */
-#define DPLL_IOB               (1 << 13)
-#define DPLL_PLL_ENABLE                (1 << 4)
-#define DPLL_LOCK              (1 << 0)
+/*
+ * Bus address is physical address, except for OMAP-1510 Local Bus.
+ * OMAP-1510 bus address is translated into a Local Bus address if the
+ * OMAP bus type is lbus.
+ */
+#define OMAP1510_LB_OFFSET        UL(0x30000000)
 
-/* ULPD_APLL_CTRL */
-#define APLL_NDPLL_SWITCH      (1 << 0)
+/*
+ * OMAP-1510 specific Local Bus clock on/off
+ */
+static int omap_1510_local_bus_power(int on)
+{
+       if (on) {
+               omap_writel((1 << 1) | (1 << 0), OMAP1510_LB_MMU_CTL);
+               udelay(200);
+       } else {
+               omap_writel(0, OMAP1510_LB_MMU_CTL);
+       }
 
-static int omap_1510_usb_ohci_notifier(struct notifier_block *nb,
-               unsigned long event, void *data)
+       return 0;
+}
+
+/*
+ * OMAP-1510 specific Local Bus initialization
+ * NOTE: This assumes 32MB memory size in OMAP1510LB_MEMSIZE.
+ *       See also arch/mach-omap/memory.h for __virt_to_dma() and
+ *       __dma_to_virt() which need to match with the physical
+ *       Local Bus address below.
+ */
+static int omap_1510_local_bus_init(void)
 {
-       struct device *dev = data;
+       unsigned int tlb;
+       unsigned long lbaddr, physaddr;
+
+       omap_writel((omap_readl(OMAP1510_LB_CLOCK_DIV) & 0xfffffff8) | 0x4,
+              OMAP1510_LB_CLOCK_DIV);
+
+       /* Configure the Local Bus MMU table */
+       for (tlb = 0; tlb < OMAP1510_LB_MEMSIZE; tlb++) {
+               lbaddr = tlb * 0x00100000 + OMAP1510_LB_OFFSET;
+               physaddr = tlb * 0x00100000 + PHYS_OFFSET;
+               omap_writel((lbaddr & 0x0fffffff) >> 22, OMAP1510_LB_MMU_CAM_H);
+               omap_writel(((lbaddr & 0x003ffc00) >> 6) | 0xc,
+                      OMAP1510_LB_MMU_CAM_L);
+               omap_writel(physaddr >> 16, OMAP1510_LB_MMU_RAM_H);
+               omap_writel((physaddr & 0x0000fc00) | 0x300, OMAP1510_LB_MMU_RAM_L);
+               omap_writel(tlb << 4, OMAP1510_LB_MMU_LCK);
+               omap_writel(0x1, OMAP1510_LB_MMU_LD_TLB);
+       }
 
-       if (event != BUS_NOTIFY_ADD_DEVICE)
-               return NOTIFY_DONE;
+       /* Enable the walking table */
+       omap_writel(omap_readl(OMAP1510_LB_MMU_CTL) | (1 << 3), OMAP1510_LB_MMU_CTL);
+       udelay(200);
 
-       if (strncmp(dev_name(dev), "ohci", 4) == 0 &&
-           dma_direct_set_offset(dev, PHYS_OFFSET, OMAP1510_LB_OFFSET,
-                       (u64)-1))
-               WARN_ONCE(1, "failed to set DMA offset\n");
-       return NOTIFY_OK;
+       return 0;
 }
 
-static struct notifier_block omap_1510_usb_ohci_nb = {
-       .notifier_call          = omap_1510_usb_ohci_notifier,
-};
+static void omap_1510_local_bus_reset(void)
+{
+       omap_1510_local_bus_power(1);
+       omap_1510_local_bus_init();
+}
+
+/* ULPD_DPLL_CTRL */
+#define DPLL_IOB               (1 << 13)
+#define DPLL_PLL_ENABLE                (1 << 4)
+#define DPLL_LOCK              (1 << 0)
+
+/* ULPD_APLL_CTRL */
+#define APLL_NDPLL_SWITCH      (1 << 0)
 
 static void __init omap_1510_usb_init(struct omap_usb_config *config)
 {
@@ -616,19 +664,19 @@ static void __init omap_1510_usb_init(struct omap_usb_config *config)
        }
 #endif
 
-#if    IS_ENABLED(CONFIG_USB_OHCI_HCD)
-       if (config->register_host) {
+       if (IS_ENABLED(CONFIG_USB_OHCI_HCD) && config->register_host) {
                int status;
 
-               bus_register_notifier(&platform_bus_type,
-                                     &omap_1510_usb_ohci_nb);
                ohci_device.dev.platform_data = config;
+               dma_direct_set_offset(&ohci_device.dev, PHYS_OFFSET,
+                                     OMAP1510_LB_OFFSET, (u64)-1);
                status = platform_device_register(&ohci_device);
                if (status)
                        pr_debug("can't register OHCI device, %d\n", status);
                /* hcd explicitly gates 48MHz */
+
+               config->lb_reset = omap_1510_local_bus_reset;
        }
-#endif
 }
 
 #else
index 7f13adf26e61198c52a5b4289dad9664b7e03463..02c253de9b6e78ff341ec870781dd3fd6b11fbca 100644 (file)
@@ -112,7 +112,6 @@ config ARCH_OMAP2PLUS
        select PM_GENERIC_DOMAINS
        select PM_GENERIC_DOMAINS_OF
        select RESET_CONTROLLER
-       select SIMPLE_PM_BUS
        select SOC_BUS
        select TI_SYSC
        select OMAP_IRQCHIP
index 12b26e04686faf178c27ad45897e1052411c0a05..0c2936c7a37998b769807063e3a5f0c1f0b26b1a 100644 (file)
@@ -3614,6 +3614,8 @@ int omap_hwmod_init_module(struct device *dev,
                oh->flags |= HWMOD_SWSUP_SIDLE_ACT;
        if (data->cfg->quirks & SYSC_QUIRK_SWSUP_MSTANDBY)
                oh->flags |= HWMOD_SWSUP_MSTANDBY;
+       if (data->cfg->quirks & SYSC_QUIRK_CLKDM_NOAUTO)
+               oh->flags |= HWMOD_CLKDM_NOAUTO;
 
        error = omap_hwmod_check_module(dev, oh, data, sysc_fields,
                                        rev_offs, sysc_offs, syss_offs,
index e2c743aa2eb2b88042064657e2307ab0134d31d8..d9f7dfe2a7ed3efac90d0c0fe5852b5503e7eb49 100644 (file)
@@ -340,6 +340,7 @@ ENTRY(\name\()_cache_fns)
 
 .macro define_tlb_functions name:req, flags_up:req, flags_smp
        .type   \name\()_tlb_fns, #object
+       .align 2
 ENTRY(\name\()_tlb_fns)
        .long   \name\()_flush_user_tlb_range
        .long   \name\()_flush_kern_tlb_range
index a951276f05475ab8520a666257435b943b09d575..a903b26cde40970f89ca530bb621456ba202c8ed 100644 (file)
  *                        +-----+
  *                        |RSVD | JIT scratchpad
  * current ARM_SP =>      +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE)
+ *                        | ... | caller-saved registers
+ *                        +-----+
+ *                        | ... | arguments passed on stack
+ * ARM_SP during call =>  +-----|
  *                        |     |
  *                        | ... | Function call stack
  *                        |     |
  *
  * When popping registers off the stack at the end of a BPF function, we
  * reference them via the current ARM_FP register.
+ *
+ * Some eBPF operations are implemented via a call to a helper function.
+ * Such calls are "invisible" in the eBPF code, so it is up to the calling
+ * program to preserve any caller-saved ARM registers during the call. The
+ * JIT emits code to push and pop those registers onto the stack, immediately
+ * above the callee stack frame.
  */
 #define CALLEE_MASK    (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \
                         1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R9 | \
@@ -70,6 +80,8 @@
 #define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR)
 #define CALLEE_POP_MASK  (CALLEE_MASK | 1 << ARM_PC)
 
+#define CALLER_MASK    (1 << ARM_R0 | 1 << ARM_R1 | 1 << ARM_R2 | 1 << ARM_R3)
+
 enum {
        /* Stack layout - these are offsets from (top of stack - 4) */
        BPF_R2_HI,
@@ -464,6 +476,7 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
 
 static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
 {
+       const int exclude_mask = BIT(ARM_R0) | BIT(ARM_R1);
        const s8 *tmp = bpf2a32[TMP_REG_1];
 
 #if __LINUX_ARM_ARCH__ == 7
@@ -495,11 +508,17 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
                emit(ARM_MOV_R(ARM_R0, rm), ctx);
        }
 
+       /* Push caller-saved registers on stack */
+       emit(ARM_PUSH(CALLER_MASK & ~exclude_mask), ctx);
+
        /* Call appropriate function */
        emit_mov_i(ARM_IP, op == BPF_DIV ?
                   (u32)jit_udiv32 : (u32)jit_mod32, ctx);
        emit_blx_r(ARM_IP, ctx);
 
+       /* Restore caller-saved registers from stack */
+       emit(ARM_POP(CALLER_MASK & ~exclude_mask), ctx);
+
        /* Save return value */
        if (rd != ARM_R0)
                emit(ARM_MOV_R(rd, ARM_R0), ctx);
index 27e0af78e88b022b18f26f1d64e6110610a4a883..9d8634e2f12f769af0fce2c64d3321411f4d1bcf 100644 (file)
@@ -439,7 +439,7 @@ static struct undef_hook kprobes_arm_break_hook = {
 
 #endif /* !CONFIG_THUMB2_KERNEL */
 
-int __init arch_init_kprobes()
+int __init arch_init_kprobes(void)
 {
        arm_probes_decode_init();
 #ifdef CONFIG_THUMB2_KERNEL
index 5c7ae4c3954be956d9ca39b2f7795a52b6cfbb7f..fee914c716aa262d12060cce3980471d510af283 100644 (file)
@@ -1931,8 +1931,6 @@ source "drivers/cpufreq/Kconfig"
 
 endmenu
 
-source "drivers/firmware/Kconfig"
-
 source "drivers/acpi/Kconfig"
 
 source "arch/arm64/kvm/Kconfig"
index 02f8e72f0cad1db1e5490ee559cd682b5e8250a5..05486cccee1c4ef73d7e8284d30703280c363c79 100644 (file)
@@ -75,7 +75,7 @@
        pinctrl-0 = <&emac_rgmii_pins>;
        phy-supply = <&reg_gmac_3v3>;
        phy-handle = <&ext_rgmii_phy>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
        status = "okay";
 };
 
index 05ae893d1b2ee7aaf048b353fd829eb2ae5b96ad..fbf13f7c2baf5db494c41a7769cfe0ede723b446 100644 (file)
 
        bus@8000000 {
                compatible = "arm,vexpress,v2m-p1", "simple-bus";
-               arm,v2m-memory-map = "rs1";
                #address-cells = <2>; /* SMB chipselect number and offset */
                #size-cells = <1>;
 
index b8a21092db4d3f09e03e065b36343d84b329286b..269b649934b5abf4f4675c8c48f08c530aadb944 100644 (file)
                                remote-endpoint = <&clcd_pads>;
                        };
                };
-
-              panel-timing {
-                      clock-frequency = <63500127>;
-                      hactive = <1024>;
-                      hback-porch = <152>;
-                      hfront-porch = <48>;
-                      hsync-len = <104>;
-                      vactive = <768>;
-                      vback-porch = <23>;
-                      vfront-porch = <3>;
-                      vsync-len = <4>;
-              };
        };
 
        bus@8000000 {
-               compatible = "simple-bus";
-
-               #address-cells = <2>;
-               #size-cells = <1>;
-               ranges = <0 0 0 0x08000000 0x04000000>,
-                        <1 0 0 0x14000000 0x04000000>,
-                        <2 0 0 0x18000000 0x04000000>,
-                        <3 0 0 0x1c000000 0x04000000>,
-                        <4 0 0 0x0c000000 0x04000000>,
-                        <5 0 0 0x10000000 0x04000000>;
-
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 63>;
                interrupt-map = <0 0  0 &gic 0 0 GIC_SPI  0 IRQ_TYPE_LEVEL_HIGH>,
index 8e7a66943b0180d5077a190f872af96843c617da..6288e104a0893f65377ee70706c95137fe4e63ba 100644 (file)
@@ -27,8 +27,6 @@
                reg = <0x0 0x2b1f0000 0x0 0x1000>;
                interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
-               interrupt-names = "mhu_lpri_rx",
-                                 "mhu_hpri_rx";
                #mbox-cells = <1>;
                clocks = <&soc_refclk100mhz>;
                clock-names = "apb_pclk";
        };
 
        bus@8000000 {
-               compatible = "simple-bus";
-               #address-cells = <2>;
-               #size-cells = <1>;
-               ranges = <0 0 0 0x08000000 0x04000000>,
-                        <1 0 0 0x14000000 0x04000000>,
-                        <2 0 0 0x18000000 0x04000000>,
-                        <3 0 0 0x1c000000 0x04000000>,
-                        <4 0 0 0x0c000000 0x04000000>,
-                        <5 0 0 0x10000000 0x04000000>;
-
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 15>;
                interrupt-map = <0 0  0 &gic 0 GIC_SPI  68 IRQ_TYPE_LEVEL_HIGH>,
index 40d95c58b55e255454d7d8fa0bbc418c84ef7e01..fefd2b5f01762820531cafb6c7bd51c3749ab994 100644 (file)
        };
 
        bus@8000000 {
-               motherboard-bus {
+               compatible = "simple-bus";
+               #address-cells = <2>;
+               #size-cells = <1>;
+               ranges = <0 0x8000000 0 0x8000000 0x18000000>;
+
+               motherboard-bus@8000000 {
                        compatible = "arm,vexpress,v2p-p1", "simple-bus";
                        #address-cells = <2>;  /* SMB chipselect number and offset */
                        #size-cells = <1>;
-                       #interrupt-cells = <1>;
-                       ranges;
-                       model = "V2M-Juno";
+                       ranges = <0 0 0 0x08000000 0x04000000>,
+                                <1 0 0 0x14000000 0x04000000>,
+                                <2 0 0 0x18000000 0x04000000>,
+                                <3 0 0 0x1c000000 0x04000000>,
+                                <4 0 0 0x0c000000 0x04000000>,
+                                <5 0 0 0x10000000 0x04000000>;
                        arm,hbi = <0x252>;
                        arm,vexpress,site = <0>;
-                       arm,v2m-memory-map = "rs1";
 
                        flash@0 {
                                /* 2 * 32MiB NOR Flash memory mounted on CS0 */
                                        };
                                };
 
-                               mmci@50000 {
+                               mmc@50000 {
                                        compatible = "arm,pl180", "arm,primecell";
                                        reg = <0x050000 0x1000>;
                                        interrupts = <5>;
                                        clock-names = "KMIREFCLK", "apb_pclk";
                                };
 
-                               wdt@f0000 {
+                               watchdog@f0000 {
                                        compatible = "arm,sp805", "arm,primecell";
                                        reg = <0x0f0000 0x10000>;
                                        interrupts = <7>;
index 3050f45bade4a2540d22a4e09da7fed2055ddf32..258991ad7cc0af2ecc7a7f5ec793f98074a1164e 100644 (file)
        };
 
        bus@8000000 {
-               compatible = "simple-bus";
-
-               #address-cells = <2>;
-               #size-cells = <1>;
-               ranges = <0 0 0 0x08000000 0x04000000>,
-                        <1 0 0 0x14000000 0x04000000>,
-                        <2 0 0 0x18000000 0x04000000>,
-                        <3 0 0 0x1c000000 0x04000000>,
-                        <4 0 0 0x0c000000 0x04000000>,
-                        <5 0 0 0x10000000 0x04000000>;
-
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 63>;
                interrupt-map = <0 0  0 &gic GIC_SPI  0 IRQ_TYPE_LEVEL_HIGH>,
index b917d9d3f1c4c16550d7cde7e1dc47f18aefb270..33182d9e582677fc0bbdf3d698d46ef102b1731e 100644 (file)
@@ -6,7 +6,7 @@
  */
 / {
        bus@8000000 {
-               motherboard-bus {
+               motherboard-bus@8000000 {
                        arm,v2m-memory-map = "rs2";
 
                        iofpga-bus@300000000 {
index 4c4a381d2c75fd4b805e663f6683fd98872a4d91..5f6cab668aa079853c554e284077db1adb37387f 100644 (file)
        };
 
        bus@8000000 {
-               motherboard-bus {
-                       arm,v2m-memory-map = "rs1";
+               compatible = "simple-bus";
+               #address-cells = <2>;
+               #size-cells = <1>;
+               ranges = <0 0x8000000 0 0x8000000 0x18000000>;
+
+               motherboard-bus@8000000 {
                        compatible = "arm,vexpress,v2m-p1", "simple-bus";
                        #address-cells = <2>; /* SMB chipselect number and offset */
                        #size-cells = <1>;
-                       #interrupt-cells = <1>;
-                       ranges;
+                       ranges = <0 0 0 0x08000000 0x04000000>,
+                                <1 0 0 0x14000000 0x04000000>,
+                                <2 0 0 0x18000000 0x04000000>,
+                                <3 0 0 0x1c000000 0x04000000>,
+                                <4 0 0 0x0c000000 0x04000000>,
+                                <5 0 0 0x10000000 0x04000000>;
 
                        flash@0 {
                                compatible = "arm,vexpress-flash", "cfi-flash";
                                        clock-names = "apb_pclk";
                                };
 
-                               mmci@50000 {
+                               mmc@50000 {
                                        compatible = "arm,pl180", "arm,primecell";
                                        reg = <0x050000 0x1000>;
                                        interrupts = <9>, <10>;
                                        clock-names = "uartclk", "apb_pclk";
                                };
 
-                               wdt@f0000 {
+                               watchdog@f0000 {
                                        compatible = "arm,sp805", "arm,primecell";
                                        reg = <0x0f0000 0x1000>;
                                        interrupts = <0>;
index d859914500a706bbecc4d237b172c042fe8fc560..5b6d9d8e934db472ea29a67859174535beb6ecfe 100644 (file)
        };
 
        smb: bus@8000000 {
-               compatible = "simple-bus";
-
-               #address-cells = <2>;
-               #size-cells = <1>;
-               ranges = <0 0 0 0x08000000 0x04000000>,
-                        <1 0 0 0x14000000 0x04000000>,
-                        <2 0 0 0x18000000 0x04000000>,
-                        <3 0 0 0x1c000000 0x04000000>,
-                        <4 0 0 0x0c000000 0x04000000>,
-                        <5 0 0 0x10000000 0x04000000>;
-
-               #interrupt-cells = <1>;
-               interrupt-map-mask = <0 0 63>;
-               interrupt-map = <0 0  0 &gic GIC_SPI  0 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0  1 &gic GIC_SPI  1 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0  2 &gic GIC_SPI  2 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0  3 &gic GIC_SPI  3 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0  4 &gic GIC_SPI  4 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0  5 &gic GIC_SPI  5 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0  6 &gic GIC_SPI  6 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0  7 &gic GIC_SPI  7 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0  8 &gic GIC_SPI  8 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0  9 &gic GIC_SPI  9 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 10 &gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 11 &gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 12 &gic GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 13 &gic GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 14 &gic GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 15 &gic GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 16 &gic GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 17 &gic GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 18 &gic GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 19 &gic GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 20 &gic GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 21 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 22 &gic GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 23 &gic GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 24 &gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 25 &gic GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 26 &gic GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 27 &gic GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 28 &gic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 29 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 30 &gic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 31 &gic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 32 &gic GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 33 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 34 &gic GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 35 &gic GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 36 &gic GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 37 &gic GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 38 &gic GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 39 &gic GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 40 &gic GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 41 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 42 &gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+               ranges = <0x8000000 0 0x8000000 0x18000000>;
        };
 };
index 343ecf0e8973a2faf4b61f820880c8f8b75694d6..06b36cc65865c831bc192581518fa64a7e7e5132 100644 (file)
                        interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <0>; /* fixed up by bootloader */
                        clocks = <&clockgen QORIQ_CLK_HWACCEL 1>;
-                       voltage-ranges = <1800 1800 3300 3300>;
+                       voltage-ranges = <1800 1800>;
                        sdhci,auto-cmd12;
-                       broken-cd;
+                       non-removable;
                        little-endian;
                        bus-width = <4>;
                        status = "disabled";
index 988f8ab679ad6c3ec0942bb38d39971b2edaa477..40f5e7a3b0644ed7c348008eacd3f5819f6c8c08 100644 (file)
@@ -91,7 +91,7 @@
                #size-cells = <1>;
                compatible = "jedec,spi-nor";
                spi-max-frequency = <80000000>;
-               spi-tx-bus-width = <4>;
+               spi-tx-bus-width = <1>;
                spi-rx-bus-width = <4>;
        };
 };
index 4e2820d19244aa2769000630b2a83d37c076fc7c..a2b24d4d4e3e74e279f1c68b99cc2366311ef577 100644 (file)
@@ -48,7 +48,7 @@
                #size-cells = <1>;
                compatible = "jedec,spi-nor";
                spi-max-frequency = <80000000>;
-               spi-tx-bus-width = <4>;
+               spi-tx-bus-width = <1>;
                spi-rx-bus-width = <4>;
        };
 };
index d17abb51583511abe905359bef28318516e4c644..e99e7644ff392b1a92ad95f5749f6d4965802a3c 100644 (file)
@@ -70,7 +70,9 @@
                regulator-name = "rst-usb-eth2";
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_usb_eth2>;
-               gpio = <&gpio3 2 GPIO_ACTIVE_LOW>;
+               gpio = <&gpio3 2 GPIO_ACTIVE_HIGH>;
+               enable-active-high;
+               regulator-always-on;
        };
 
        reg_vdd_5v: regulator-5v {
@@ -95,7 +97,7 @@
                clocks = <&osc_can>;
                interrupt-parent = <&gpio4>;
                interrupts = <28 IRQ_TYPE_EDGE_FALLING>;
-               spi-max-frequency = <100000>;
+               spi-max-frequency = <10000000>;
                vdd-supply = <&reg_vdd_3v3>;
                xceiver-supply = <&reg_vdd_5v>;
        };
 &fec1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_enet>;
-       phy-connection-type = "rgmii";
+       phy-connection-type = "rgmii-rxid";
        phy-handle = <&ethphy>;
        status = "okay";
 
index d0456daefda8830bbc46bbfacec716651090101c..42bbbb3f532bc96fa2cb161f033f956243e58c95 100644 (file)
                        reg_vdd_soc: BUCK1 {
                                regulator-name = "buck1";
                                regulator-min-microvolt = <800000>;
-                               regulator-max-microvolt = <900000>;
+                               regulator-max-microvolt = <850000>;
                                regulator-boot-on;
                                regulator-always-on;
                                regulator-ramp-delay = <3125>;
+                               nxp,dvs-run-voltage = <850000>;
+                               nxp,dvs-standby-voltage = <800000>;
                        };
 
                        reg_vdd_arm: BUCK2 {
                                regulator-min-microvolt = <850000>;
                                regulator-max-microvolt = <950000>;
                                regulator-boot-on;
+                               regulator-always-on;
                                regulator-ramp-delay = <3125>;
                                nxp,dvs-run-voltage = <950000>;
                                nxp,dvs-standby-voltage = <850000>;
                        reg_vdd_dram: BUCK3 {
                                regulator-name = "buck3";
                                regulator-min-microvolt = <850000>;
-                               regulator-max-microvolt = <900000>;
+                               regulator-max-microvolt = <950000>;
                                regulator-boot-on;
                                regulator-always-on;
                        };
 
                        reg_vdd_snvs: LDO2 {
                                regulator-name = "ldo2";
-                               regulator-min-microvolt = <850000>;
+                               regulator-min-microvolt = <800000>;
                                regulator-max-microvolt = <900000>;
                                regulator-boot-on;
                                regulator-always-on;
index 05cb60991fb9963e006e0d2ecba4edf7a7ed7892..d52686f4c0598850bc6dc9ec8d5f69361e567558 100644 (file)
        pinctrl_hog: hoggrp {
                fsl,pins = <
                        MX8MM_IOMUXC_NAND_CE0_B_GPIO3_IO1       0x40000159 /* M2_GDIS# */
-                       MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12      0x40000041 /* M2_RST# */
+                       MX8MM_IOMUXC_GPIO1_IO13_GPIO1_IO13      0x40000041 /* M2_RST# */
                        MX8MM_IOMUXC_NAND_DATA01_GPIO3_IO7      0x40000119 /* M2_OFF# */
                        MX8MM_IOMUXC_GPIO1_IO15_GPIO1_IO15      0x40000159 /* M2_WDIS# */
                        MX8MM_IOMUXC_SAI1_TXD2_GPIO4_IO14       0x40000041 /* AMP GPIO1 */
index 54eaf3d6055b11707c1fad1b1bd68729fa5f5bfe..3b2d627a03428937edc42f67e49684ab09d66ddb 100644 (file)
                #size-cells = <1>;
                compatible = "jedec,spi-nor";
                spi-max-frequency = <80000000>;
-               spi-tx-bus-width = <4>;
+               spi-tx-bus-width = <1>;
                spi-rx-bus-width = <4>;
        };
 };
index e77db4996e58e823a58acd28dae4c1ba9eba9111..236f425e1570a2470a5b2f722fc8321ca13ece00 100644 (file)
        pinctrl_hog: hoggrp {
                fsl,pins = <
                        MX8MN_IOMUXC_NAND_CE0_B_GPIO3_IO1       0x40000159 /* M2_GDIS# */
-                       MX8MN_IOMUXC_GPIO1_IO12_GPIO1_IO12      0x40000041 /* M2_RST# */
+                       MX8MN_IOMUXC_GPIO1_IO13_GPIO1_IO13      0x40000041 /* M2_RST# */
                        MX8MN_IOMUXC_NAND_DATA01_GPIO3_IO7      0x40000119 /* M2_OFF# */
                        MX8MN_IOMUXC_GPIO1_IO15_GPIO1_IO15      0x40000159 /* M2_WDIS# */
                        MX8MN_IOMUXC_SAI2_RXFS_GPIO4_IO21       0x40000041 /* APP GPIO1 */
index aa78e0d8c72b26d68206e284923fb79ce0f36d7b..fc178eebf8aa443c0cc355e532f766439cc72850 100644 (file)
@@ -74,7 +74,7 @@
                compatible = "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <80000000>;
-               spi-tx-bus-width = <4>;
+               spi-tx-bus-width = <1>;
                spi-rx-bus-width = <4>;
        };
 };
index 49f9db971f3b22e0ab549594e75b734c0469a781..b83df77195ece79761ad3106fa06818aa400683b 100644 (file)
                #size-cells = <1>;
                compatible = "micron,n25q256a", "jedec,spi-nor";
                spi-max-frequency = <29000000>;
+               spi-tx-bus-width = <1>;
+               spi-rx-bus-width = <4>;
        };
 };
 
index f593e4ff62e1c73fb092fdee4c5f59e5afb4e8a5..564746d5000d5aed30f6c29b5977083a254eb88c 100644 (file)
                #address-cells = <1>;
                #size-cells = <1>;
                reg = <0>;
-               spi-tx-bus-width = <4>;
+               spi-tx-bus-width = <1>;
                spi-rx-bus-width = <4>;
                m25p,fast-read;
                spi-max-frequency = <50000000>;
index c566a64b1373fc55b7d0e338d8342112dd9aabdc..0df76f7b1cc11fedc685eafb0a3201401c342bc6 100644 (file)
                #size-cells = <0>;
 
                pon: power-on@800 {
-                       compatible = "qcom,pm8916-pon";
+                       compatible = "qcom,pm8998-pon";
                        reg = <0x0800>;
+                       mode-bootloader = <0x2>;
+                       mode-recovery = <0x1>;
 
                        pon_pwrkey: pwrkey {
                                compatible = "qcom,pm8941-pwrkey";
index 8ac96f8e79d42bababaf79a9f35b5d88a629763b..28d5b5528516b029fe6df2587471b44121ebdac6 100644 (file)
        };
 };
 
+&pon_pwrkey {
+       status = "okay";
+};
+
+&pon_resin {
+       status = "okay";
+
+       linux,code = <KEY_VOLUMEDOWN>;
+};
+
 &qupv3_id_0 {
        status = "okay";
 };
index 0f2b3c00e4346725397416b247a198c91d00aa7b..70c88c37de321028c55c6af40e2e62138e49cbc3 100644 (file)
                        "Headphone Jack", "HPOL",
                        "Headphone Jack", "HPOR";
 
-               #sound-dai-cells = <0>;
                #address-cells = <1>;
                #size-cells = <0>;
 
                        };
                };
 
-               dai-link@2 {
+               dai-link@5 {
                        link-name = "MultiMedia2";
-                       reg = <2>;
+                       reg = <LPASS_DP_RX>;
                        cpu {
-                               sound-dai = <&lpass_cpu 2>;
+                               sound-dai = <&lpass_cpu LPASS_DP_RX>;
                        };
 
                        codec {
@@ -782,7 +781,7 @@ hp_i2c: &i2c9 {
                qcom,playback-sd-lines = <0>;
        };
 
-       hdmi-primary@0 {
+       hdmi@5 {
                reg = <LPASS_DP_RX>;
        };
 };
index 53a21d0861787b50e0812c93d90fb6b565dcd9ca..fd78f16181ddd47b9452c1a1c78a8e9d926d551c 100644 (file)
 
                cpufreq_hw: cpufreq@18591000 {
                        compatible = "qcom,cpufreq-epss";
-                       reg = <0 0x18591100 0 0x900>,
-                             <0 0x18592100 0 0x900>,
-                             <0 0x18593100 0 0x900>;
+                       reg = <0 0x18591000 0 0x1000>,
+                             <0 0x18592000 0 0x1000>,
+                             <0 0x18593000 0 0x1000>;
                        clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_GPLL0>;
                        clock-names = "xo", "alternate";
                        #freq-domain-cells = <1>;
index 9153e6616ba4bda13d3ea75aa38bf20fa428b01a..9c7f87e42fccd0dfd40cd3bce643b91c45b9f79d 100644 (file)
                        compatible = "qcom,sdm660-a2noc";
                        reg = <0x01704000 0xc100>;
                        #interconnect-cells = <1>;
-                       clock-names = "bus", "bus_a";
+                       clock-names = "bus",
+                                     "bus_a",
+                                     "ipa",
+                                     "ufs_axi",
+                                     "aggre2_ufs_axi",
+                                     "aggre2_usb3_axi",
+                                     "cfg_noc_usb2_axi";
                        clocks = <&rpmcc RPM_SMD_AGGR2_NOC_CLK>,
-                                <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>;
+                                <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>,
+                                <&rpmcc RPM_SMD_IPA_CLK>,
+                                <&gcc GCC_UFS_AXI_CLK>,
+                                <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
+                                <&gcc GCC_AGGRE2_USB3_AXI_CLK>,
+                                <&gcc GCC_CFG_NOC_USB2_AXI_CLK>;
                };
 
                mnoc: interconnect@1745000 {
index 6d7172e6f4c30558b06424b6be6ceb0c7c30fc45..b3b91192618449ed3d6b84d328c8bb9f42cc0782 100644 (file)
                        no-map;
                };
 
-               wlan_msa_mem: memory@8c400000 {
-                       reg = <0 0x8c400000 0 0x100000>;
+               ipa_fw_mem: memory@8c400000 {
+                       reg = <0 0x8c400000 0 0x10000>;
                        no-map;
                };
 
-               gpu_mem: memory@8c515000 {
-                       reg = <0 0x8c515000 0 0x2000>;
+               ipa_gsi_mem: memory@8c410000 {
+                       reg = <0 0x8c410000 0 0x5000>;
                        no-map;
                };
 
-               ipa_fw_mem: memory@8c517000 {
-                       reg = <0 0x8c517000 0 0x5a000>;
+               gpu_mem: memory@8c415000 {
+                       reg = <0 0x8c415000 0 0x2000>;
                        no-map;
                };
 
-               adsp_mem: memory@8c600000 {
-                       reg = <0 0x8c600000 0 0x1a00000>;
+               adsp_mem: memory@8c500000 {
+                       reg = <0 0x8c500000 0 0x1a00000>;
+                       no-map;
+               };
+
+               wlan_msa_mem: memory@8df00000 {
+                       reg = <0 0x8df00000 0 0x100000>;
                        no-map;
                };
 
index 385e5029437d368190d56d6aa02df8ca78577dab..2ba23aa582a18d3436b370952d7c4075c8d2e39e 100644 (file)
 #include "sdm850.dtsi"
 #include "pm8998.dtsi"
 
+/*
+ * Update following upstream (sdm845.dtsi) reserved
+ * memory mappings for firmware loading to succeed
+ * and enable the IPA device.
+ */
+/delete-node/ &ipa_fw_mem;
+/delete-node/ &ipa_gsi_mem;
+/delete-node/ &gpu_mem;
+/delete-node/ &adsp_mem;
+/delete-node/ &wlan_msa_mem;
+
 / {
        model = "Lenovo Yoga C630";
        compatible = "lenovo,yoga-c630", "qcom,sdm845";
                };
        };
 
+       /* Reserved memory changes for IPA */
+       reserved-memory {
+               wlan_msa_mem: memory@8c400000 {
+                       reg = <0 0x8c400000 0 0x100000>;
+                       no-map;
+               };
+
+               gpu_mem: memory@8c515000 {
+                       reg = <0 0x8c515000 0 0x2000>;
+                       no-map;
+               };
+
+               ipa_fw_mem: memory@8c517000 {
+                       reg = <0 0x8c517000 0 0x5a000>;
+                       no-map;
+               };
+
+               adsp_mem: memory@8c600000 {
+                       reg = <0 0x8c600000 0 0x1a00000>;
+                       no-map;
+               };
+       };
+
        sn65dsi86_refclk: sn65dsi86-refclk {
                compatible = "fixed-clock";
                #clock-cells = <0>;
index 8c15d9fed08f089f034333f896bc1f511a05eca7..d12e4cbfc8527db04a7c978adf097883b330e6d3 100644 (file)
                        power-domains = <&dispcc MDSS_GDSC>;
 
                        clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
+                                <&gcc GCC_DISP_HF_AXI_CLK>,
                                 <&gcc GCC_DISP_SF_AXI_CLK>,
                                 <&dispcc DISP_CC_MDSS_MDP_CLK>;
-                       clock-names = "iface", "nrt_bus", "core";
+                       clock-names = "iface", "bus", "nrt_bus", "core";
 
                        assigned-clocks = <&dispcc DISP_CC_MDSS_MDP_CLK>;
                        assigned-clock-rates = <460000000>;
index 156d96afbbfcff1385efe40f669c528b9deacc72..545197bc050134766df4b6998b82cc7f27647756 100644 (file)
@@ -245,7 +245,6 @@ CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_FW_LOADER_USER_HELPER=y
 CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
 CONFIG_HISILICON_LPC=y
-CONFIG_SIMPLE_PM_BUS=y
 CONFIG_FSL_MC_BUS=y
 CONFIG_TEGRA_ACONNECT=m
 CONFIG_GNSS=m
index fb0f523d14921c6c79b246e63111f4c1cbbf0767..0a048dc06a7d789ea93f669d08a8d426630771f0 100644 (file)
@@ -24,6 +24,7 @@ struct hyp_pool {
 
 /* Allocation */
 void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order);
+void hyp_split_page(struct hyp_page *page);
 void hyp_get_page(struct hyp_pool *pool, void *addr);
 void hyp_put_page(struct hyp_pool *pool, void *addr);
 
index 5df6193fc43044158f60db2606fb27d6902957c6..8d741f71377f4bf72b551eefcb1d847d4f761842 100644 (file)
@@ -54,7 +54,7 @@ $(obj)/kvm_nvhe.tmp.o: $(obj)/hyp.lds $(addprefix $(obj)/,$(hyp-obj)) FORCE
 #    runtime. Because the hypervisor is part of the kernel binary, relocations
 #    produce a kernel VA. We enumerate relocations targeting hyp at build time
 #    and convert the kernel VAs at those positions to hyp VAs.
-$(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel
+$(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel FORCE
        $(call if_changed,hyprel)
 
 # 5) Compile hyp-reloc.S and link it into the existing partially linked object.
index bacd493a4eacdefc1af400ec1a97309f02ac3510..34eeb524b686075aca7d60876b6429d25df0cbf6 100644 (file)
@@ -35,7 +35,18 @@ const u8 pkvm_hyp_id = 1;
 
 static void *host_s2_zalloc_pages_exact(size_t size)
 {
-       return hyp_alloc_pages(&host_s2_pool, get_order(size));
+       void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
+
+       hyp_split_page(hyp_virt_to_page(addr));
+
+       /*
+        * The size of concatenated PGDs is always a power of two of PAGE_SIZE,
+        * so there should be no need to free any of the tail pages to make the
+        * allocation exact.
+        */
+       WARN_ON(size != (PAGE_SIZE << get_order(size)));
+
+       return addr;
 }
 
 static void *host_s2_zalloc_page(void *pool)
index 41fc25bdfb34660f470ef9dda78fe0a8a7aee989..0bd7701ad1df5d1e71fc58c4d1d9de72e55c7cdf 100644 (file)
@@ -152,6 +152,7 @@ static inline void hyp_page_ref_inc(struct hyp_page *p)
 
 static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
 {
+       BUG_ON(!p->refcount);
        p->refcount--;
        return (p->refcount == 0);
 }
@@ -193,6 +194,20 @@ void hyp_get_page(struct hyp_pool *pool, void *addr)
        hyp_spin_unlock(&pool->lock);
 }
 
+void hyp_split_page(struct hyp_page *p)
+{
+       unsigned short order = p->order;
+       unsigned int i;
+
+       p->order = 0;
+       for (i = 1; i < (1 << order); i++) {
+               struct hyp_page *tail = p + i;
+
+               tail->order = 0;
+               hyp_set_page_refcounted(tail);
+       }
+}
+
 void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order)
 {
        unsigned short i = order;
index 1a94a7ca48f27da2d739ad7b2e76eb403cad451b..69bd1732a299f79066c4d667c701e0288f5e2528 100644 (file)
@@ -1529,8 +1529,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
                 * when updating the PG_mte_tagged page flag, see
                 * sanitise_mte_tags for more details.
                 */
-               if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED)
-                       return -EINVAL;
+               if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED) {
+                       ret = -EINVAL;
+                       break;
+               }
 
                if (vma->vm_flags & VM_PFNMAP) {
                        /* IO region dirty page logging not allowed */
index f9bb3b14130eef9da9e6fe0214c65bc9d23f6861..c84fe24b2ea1e8f1f3a2ab6e1f6e6c9bb46801ed 100644 (file)
@@ -50,9 +50,6 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
 
 int kvm_perf_init(void)
 {
-       if (kvm_pmu_probe_pmuver() != ID_AA64DFR0_PMUVER_IMP_DEF && !is_protected_kvm_enabled())
-               static_branch_enable(&kvm_arm_pmu_available);
-
        return perf_register_guest_info_callbacks(&kvm_guest_cbs);
 }
 
index f5065f23b413faf606137b3ceabc1bc81b87a27b..2af3c37445e00899217ac82a6c9e7ddc474b7093 100644 (file)
@@ -740,7 +740,14 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
        kvm_pmu_create_perf_event(vcpu, select_idx);
 }
 
-int kvm_pmu_probe_pmuver(void)
+void kvm_host_pmu_init(struct arm_pmu *pmu)
+{
+       if (pmu->pmuver != 0 && pmu->pmuver != ID_AA64DFR0_PMUVER_IMP_DEF &&
+           !kvm_arm_support_pmu_v3() && !is_protected_kvm_enabled())
+               static_branch_enable(&kvm_arm_pmu_available);
+}
+
+static int kvm_pmu_probe_pmuver(void)
 {
        struct perf_event_attr attr = { };
        struct perf_event *event;
index 23505fc353247019e952a2b760dda82677d5fdfe..a8158c9489666819b5d2a70034c3c645e512bc1f 100644 (file)
@@ -43,7 +43,7 @@ void __init arm64_hugetlb_cma_reserve(void)
 #ifdef CONFIG_ARM64_4K_PAGES
        order = PUD_SHIFT - PAGE_SHIFT;
 #else
-       order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT;
+       order = CONT_PMD_SHIFT - PAGE_SHIFT;
 #endif
        /*
         * HugeTLB CMA reservation is required for gigantic
index 9d4d898df76ba717e7808afc4209e0cc20c4dafc..823d3d5a9e11487c3037fbbc01af0141456c9335 100644 (file)
@@ -8,7 +8,7 @@ config CSKY
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
        select ARCH_USE_BUILTIN_BSWAP
        select ARCH_USE_QUEUED_RWLOCKS
-       select ARCH_WANT_FRAME_POINTERS if !CPU_CK610
+       select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 && $(cc-option,-mbacktrace)
        select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
        select COMMON_CLK
        select CLKSRC_MMIO
@@ -241,6 +241,7 @@ endchoice
 
 menuconfig HAVE_TCM
        bool "Tightly-Coupled/Sram Memory"
+       depends on !COMPILE_TEST
        help
          The implementation are not only used by TCM (Tightly-Coupled Meory)
          but also used by sram on SOC bus. It follow existed linux tcm
index 91818787d860925defd3fc71cb3cefe87fe02d17..02b72a0007671308878bae4de47eda256b442324 100644 (file)
@@ -74,7 +74,6 @@ static __always_inline unsigned long __fls(unsigned long x)
  * bug fix, why only could use atomic!!!!
  */
 #include <asm-generic/bitops/non-atomic.h>
-#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
 
 #include <asm-generic/bitops/le.h>
 #include <asm-generic/bitops/ext2-atomic.h>
index 0105ac81b4328adc090342543cd4601c048aed7d..1a5f54e0d272631137179c848715a7ab8a5791ff 100644 (file)
@@ -99,7 +99,8 @@ static int gpr_set(struct task_struct *target,
        if (ret)
                return ret;
 
-       regs.sr = task_pt_regs(target)->sr;
+       /* BIT(0) of regs.sr is Condition Code/Carry bit */
+       regs.sr = (regs.sr & BIT(0)) | (task_pt_regs(target)->sr & ~BIT(0));
 #ifdef CONFIG_CPU_HAS_HILO
        regs.dcsr = task_pt_regs(target)->dcsr;
 #endif
index bc4238b9f709a8304d1766f1d7e2d3b06d9ac53c..c7b763d2f526e661f16b5ad98c5c56bd4c7c257e 100644 (file)
@@ -52,10 +52,14 @@ static long restore_sigcontext(struct pt_regs *regs,
        struct sigcontext __user *sc)
 {
        int err = 0;
+       unsigned long sr = regs->sr;
 
        /* sc_pt_regs is structured the same as the start of pt_regs */
        err |= __copy_from_user(regs, &sc->sc_pt_regs, sizeof(struct pt_regs));
 
+       /* BIT(0) of regs->sr is Condition Code/Carry bit */
+       regs->sr = (sr & ~1) | (regs->sr & 1);
+
        /* Restore the floating-point state. */
        err |= restore_fpu_state(sc);
 
index 045792cde4811ef33bbdf7d614d2c53b52397e37..1e33666fa679be428e11ea0e3f0917d53de8e33c 100644 (file)
@@ -388,8 +388,6 @@ config CRASH_DUMP
          help
            Generate crash dump after being started by kexec.
 
-source "drivers/firmware/Kconfig"
-
 endmenu
 
 menu "Power management and ACPI options"
index 259b3661b614168ff8ab377587c66b1478222218..997b549330156dc87bf48de2a2c2435b4fdc62d2 100644 (file)
@@ -15,7 +15,6 @@
 #include <asm/unistd.h>
 #include <asm/errno.h>
 #include <asm/setup.h>
-#include <asm/segment.h>
 #include <asm/traps.h>
 #include <asm/asm-offsets.h>
 #include <asm/entry.h>
@@ -25,7 +24,6 @@
 .globl system_call
 .globl resume
 .globl ret_from_exception
-.globl ret_from_signal
 .globl sys_call_table
 .globl bad_interrupt
 .globl inthandler1
@@ -59,8 +57,6 @@ do_trace:
        subql   #4,%sp                  /* dummy return address */
        SAVE_SWITCH_STACK
        jbsr    syscall_trace_leave
-
-ret_from_signal:
        RESTORE_SWITCH_STACK
        addql   #4,%sp
        jra     ret_from_exception
index 774c35f47eeab5affbfba83c375452165cae16ba..0b50da08a9c56fc691aa1e918e5b641ae2dc7fae 100644 (file)
@@ -29,7 +29,6 @@ config M68K
        select NO_DMA if !MMU && !COLDFIRE
        select OLD_SIGACTION
        select OLD_SIGSUSPEND3
-       select SET_FS
        select UACCESS_MEMCPY if !MMU
        select VIRT_TO_BUS
        select ZONE_DMA
index d43a02795a4a445e18efea2d5eb386534ae8816e..9f337c70243a39f75828675170028f2ed2c18980 100644 (file)
@@ -31,7 +31,6 @@
 #include <asm/thread_info.h>
 #include <asm/errno.h>
 #include <asm/setup.h>
-#include <asm/segment.h>
 #include <asm/asm-offsets.h>
 #include <asm/entry.h>
 
@@ -51,7 +50,6 @@ sw_usp:
 .globl system_call
 .globl resume
 .globl ret_from_exception
-.globl ret_from_signal
 .globl sys_call_table
 .globl inthandler
 
@@ -98,8 +96,6 @@ ENTRY(system_call)
        subql   #4,%sp                  /* dummy return address */
        SAVE_SWITCH_STACK
        jbsr    syscall_trace_leave
-
-ret_from_signal:
        RESTORE_SWITCH_STACK
        addql   #4,%sp
 
index 3750819ac5a13bde46a73d0b9ae6af21dd5aa42f..f4d82c619a5c48ec7b3a87df334089c0e44873e6 100644 (file)
@@ -9,7 +9,6 @@
 #define __ASM_M68K_PROCESSOR_H
 
 #include <linux/thread_info.h>
-#include <asm/segment.h>
 #include <asm/fpu.h>
 #include <asm/ptrace.h>
 
@@ -75,11 +74,37 @@ static inline void wrusp(unsigned long usp)
 #define TASK_UNMAPPED_BASE     0
 #endif
 
+/* Address spaces (or Function Codes in Motorola lingo) */
+#define USER_DATA     1
+#define USER_PROGRAM  2
+#define SUPER_DATA    5
+#define SUPER_PROGRAM 6
+#define CPU_SPACE     7
+
+#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
+/*
+ * Set the SFC/DFC registers for special MM operations.  For most normal
+ * operation these remain set to USER_DATA for the uaccess routines.
+ */
+static inline void set_fc(unsigned long val)
+{
+       WARN_ON_ONCE(in_interrupt());
+
+       __asm__ __volatile__ ("movec %0,%/sfc\n\t"
+                             "movec %0,%/dfc\n\t"
+                             : /* no outputs */ : "r" (val) : "memory");
+}
+#else
+static inline void set_fc(unsigned long val)
+{
+}
+#endif /* CONFIG_CPU_HAS_ADDRESS_SPACES */
+
 struct thread_struct {
        unsigned long  ksp;             /* kernel stack pointer */
        unsigned long  usp;             /* user stack pointer */
        unsigned short sr;              /* saved status register */
-       unsigned short fs;              /* saved fs (sfc, dfc) */
+       unsigned short fc;              /* saved fc (sfc, dfc) */
        unsigned long  crp[2];          /* cpu root pointer */
        unsigned long  esp0;            /* points to SR of stack frame */
        unsigned long  faddr;           /* info about last fault */
@@ -92,7 +117,7 @@ struct thread_struct {
 #define INIT_THREAD  {                                                 \
        .ksp    = sizeof(init_stack) + (unsigned long) init_stack,      \
        .sr     = PS_S,                                                 \
-       .fs     = __KERNEL_DS,                                          \
+       .fc     = USER_DATA,                                            \
 }
 
 /*
diff --git a/arch/m68k/include/asm/segment.h b/arch/m68k/include/asm/segment.h
deleted file mode 100644 (file)
index 2b5e68a..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _M68K_SEGMENT_H
-#define _M68K_SEGMENT_H
-
-/* define constants */
-/* Address spaces (FC0-FC2) */
-#define USER_DATA     (1)
-#ifndef __USER_DS
-#define __USER_DS     (USER_DATA)
-#endif
-#define USER_PROGRAM  (2)
-#define SUPER_DATA    (5)
-#ifndef __KERNEL_DS
-#define __KERNEL_DS   (SUPER_DATA)
-#endif
-#define SUPER_PROGRAM (6)
-#define CPU_SPACE     (7)
-
-#ifndef __ASSEMBLY__
-
-typedef struct {
-       unsigned long seg;
-} mm_segment_t;
-
-#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
-
-#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
-/*
- * Get/set the SFC/DFC registers for MOVES instructions
- */
-#define USER_DS                MAKE_MM_SEG(__USER_DS)
-#define KERNEL_DS      MAKE_MM_SEG(__KERNEL_DS)
-
-static inline mm_segment_t get_fs(void)
-{
-       mm_segment_t _v;
-       __asm__ ("movec %/dfc,%0":"=r" (_v.seg):);
-       return _v;
-}
-
-static inline void set_fs(mm_segment_t val)
-{
-       __asm__ __volatile__ ("movec %0,%/sfc\n\t"
-                             "movec %0,%/dfc\n\t"
-                             : /* no outputs */ : "r" (val.seg) : "memory");
-}
-
-#else
-#define USER_DS                MAKE_MM_SEG(TASK_SIZE)
-#define KERNEL_DS      MAKE_MM_SEG(0xFFFFFFFF)
-#define get_fs()       (current_thread_info()->addr_limit)
-#define set_fs(x)      (current_thread_info()->addr_limit = (x))
-#endif
-
-#define uaccess_kernel()       (get_fs().seg == KERNEL_DS.seg)
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _M68K_SEGMENT_H */
index 15a757073fa58ffcab231b5b7f4190479b2d363b..c952658ba79223b028d9dd7baba20d8a3992d119 100644 (file)
@@ -4,7 +4,6 @@
 
 #include <asm/types.h>
 #include <asm/page.h>
-#include <asm/segment.h>
 
 /*
  * On machines with 4k pages we default to an 8k thread size, though we
@@ -27,7 +26,6 @@
 struct thread_info {
        struct task_struct      *task;          /* main task structure */
        unsigned long           flags;
-       mm_segment_t            addr_limit;     /* thread address space */
        int                     preempt_count;  /* 0 => preemptable, <0 => BUG */
        __u32                   cpu;            /* should always be 0 on m68k */
        unsigned long           tp_value;       /* thread pointer */
@@ -37,7 +35,6 @@ struct thread_info {
 #define INIT_THREAD_INFO(tsk)                  \
 {                                              \
        .task           = &tsk,                 \
-       .addr_limit     = KERNEL_DS,            \
        .preempt_count  = INIT_PREEMPT_COUNT,   \
 }
 
index a6318ccd308fd3ae3bbf3e0a62f05aae7b6ed9d1..b882e2f4f5516f8d67c501596ec22df5c2f92610 100644 (file)
@@ -13,13 +13,12 @@ static inline void flush_tlb_kernel_page(void *addr)
        if (CPU_IS_COLDFIRE) {
                mmu_write(MMUOR, MMUOR_CNL);
        } else if (CPU_IS_040_OR_060) {
-               mm_segment_t old_fs = get_fs();
-               set_fs(KERNEL_DS);
+               set_fc(SUPER_DATA);
                __asm__ __volatile__(".chip 68040\n\t"
                                     "pflush (%0)\n\t"
                                     ".chip 68k"
                                     : : "a" (addr));
-               set_fs(old_fs);
+               set_fc(USER_DATA);
        } else if (CPU_IS_020_OR_030)
                __asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
 }
@@ -84,12 +83,8 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
 
 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
 {
-       if (vma->vm_mm == current->active_mm) {
-               mm_segment_t old_fs = force_uaccess_begin();
-
+       if (vma->vm_mm == current->active_mm)
                __flush_tlb_one(addr);
-               force_uaccess_end(old_fs);
-       }
 }
 
 static inline void flush_tlb_range(struct vm_area_struct *vma,
index 4aff3358fbaff75eb80952b2ea05977845614421..a9d5c1c870d312021ed9bea77ac5075326dc978d 100644 (file)
@@ -267,6 +267,10 @@ struct frame {
     } un;
 };
 
+#ifdef CONFIG_M68040
+asmlinkage void berr_040cleanup(struct frame *fp);
+#endif
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _M68K_TRAPS_H */
index f98208ccbbcd1a3ed3594f092ff0c53b11a4165f..ba670523885c89ff72352f0147da5534b3773a1b 100644 (file)
@@ -9,13 +9,16 @@
  */
 #include <linux/compiler.h>
 #include <linux/types.h>
-#include <asm/segment.h>
 #include <asm/extable.h>
 
 /* We let the MMU do all checking */
 static inline int access_ok(const void __user *addr,
                            unsigned long size)
 {
+       /*
+        * XXX: for !CONFIG_CPU_HAS_ADDRESS_SPACES this really needs to check
+        * for TASK_SIZE!
+        */
        return 1;
 }
 
@@ -35,12 +38,9 @@ static inline int access_ok(const void __user *addr,
 #define        MOVES   "move"
 #endif
 
-extern int __put_user_bad(void);
-extern int __get_user_bad(void);
-
-#define __put_user_asm(res, x, ptr, bwl, reg, err)     \
+#define __put_user_asm(inst, res, x, ptr, bwl, reg, err) \
 asm volatile ("\n"                                     \
-       "1:     "MOVES"."#bwl"  %2,%1\n"                \
+       "1:     "inst"."#bwl"   %2,%1\n"                \
        "2:\n"                                          \
        "       .section .fixup,\"ax\"\n"               \
        "       .even\n"                                \
@@ -56,6 +56,31 @@ asm volatile ("\n"                                   \
        : "+d" (res), "=m" (*(ptr))                     \
        : #reg (x), "i" (err))
 
+#define __put_user_asm8(inst, res, x, ptr)                     \
+do {                                                           \
+       const void *__pu_ptr = (const void __force *)(ptr);     \
+                                                               \
+       asm volatile ("\n"                                      \
+               "1:     "inst".l %2,(%1)+\n"                    \
+               "2:     "inst".l %R2,(%1)\n"                    \
+               "3:\n"                                          \
+               "       .section .fixup,\"ax\"\n"               \
+               "       .even\n"                                \
+               "10:    movel %3,%0\n"                          \
+               "       jra 3b\n"                               \
+               "       .previous\n"                            \
+               "\n"                                            \
+               "       .section __ex_table,\"a\"\n"            \
+               "       .align 4\n"                             \
+               "       .long 1b,10b\n"                         \
+               "       .long 2b,10b\n"                         \
+               "       .long 3b,10b\n"                         \
+               "       .previous"                              \
+               : "+d" (res), "+a" (__pu_ptr)                   \
+               : "r" (x), "i" (-EFAULT)                        \
+               : "memory");                                    \
+} while (0)
+
 /*
  * These are the main single-value transfer routines.  They automatically
  * use the right size if we just have the right pointer type.
@@ -68,51 +93,29 @@ asm volatile ("\n"                                  \
        __chk_user_ptr(ptr);                                            \
        switch (sizeof (*(ptr))) {                                      \
        case 1:                                                         \
-               __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \
+               __put_user_asm(MOVES, __pu_err, __pu_val, ptr, b, d, -EFAULT); \
                break;                                                  \
        case 2:                                                         \
-               __put_user_asm(__pu_err, __pu_val, ptr, w, r, -EFAULT); \
+               __put_user_asm(MOVES, __pu_err, __pu_val, ptr, w, r, -EFAULT); \
                break;                                                  \
        case 4:                                                         \
-               __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \
+               __put_user_asm(MOVES, __pu_err, __pu_val, ptr, l, r, -EFAULT); \
                break;                                                  \
        case 8:                                                         \
-           {                                                           \
-               const void __user *__pu_ptr = (ptr);                    \
-               asm volatile ("\n"                                      \
-                       "1:     "MOVES".l       %2,(%1)+\n"             \
-                       "2:     "MOVES".l       %R2,(%1)\n"             \
-                       "3:\n"                                          \
-                       "       .section .fixup,\"ax\"\n"               \
-                       "       .even\n"                                \
-                       "10:    movel %3,%0\n"                          \
-                       "       jra 3b\n"                               \
-                       "       .previous\n"                            \
-                       "\n"                                            \
-                       "       .section __ex_table,\"a\"\n"            \
-                       "       .align 4\n"                             \
-                       "       .long 1b,10b\n"                         \
-                       "       .long 2b,10b\n"                         \
-                       "       .long 3b,10b\n"                         \
-                       "       .previous"                              \
-                       : "+d" (__pu_err), "+a" (__pu_ptr)              \
-                       : "r" (__pu_val), "i" (-EFAULT)                 \
-                       : "memory");                                    \
+               __put_user_asm8(MOVES, __pu_err, __pu_val, ptr);        \
                break;                                                  \
-           }                                                           \
        default:                                                        \
-               __pu_err = __put_user_bad();                            \
-               break;                                                  \
+               BUILD_BUG();                                            \
        }                                                               \
        __pu_err;                                                       \
 })
 #define put_user(x, ptr)       __put_user(x, ptr)
 
 
-#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({            \
+#define __get_user_asm(inst, res, x, ptr, type, bwl, reg, err) ({      \
        type __gu_val;                                                  \
        asm volatile ("\n"                                              \
-               "1:     "MOVES"."#bwl"  %2,%1\n"                        \
+               "1:     "inst"."#bwl"   %2,%1\n"                        \
                "2:\n"                                                  \
                "       .section .fixup,\"ax\"\n"                       \
                "       .even\n"                                        \
@@ -130,53 +133,57 @@ asm volatile ("\n"                                        \
        (x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val;  \
 })
 
+#define __get_user_asm8(inst, res, x, ptr)                             \
+do {                                                                   \
+       const void *__gu_ptr = (const void __force *)(ptr);             \
+       union {                                                         \
+               u64 l;                                                  \
+               __typeof__(*(ptr)) t;                                   \
+       } __gu_val;                                                     \
+                                                                       \
+       asm volatile ("\n"                                              \
+               "1:     "inst".l (%2)+,%1\n"                            \
+               "2:     "inst".l (%2),%R1\n"                            \
+               "3:\n"                                                  \
+               "       .section .fixup,\"ax\"\n"                       \
+               "       .even\n"                                        \
+               "10:    move.l  %3,%0\n"                                \
+               "       sub.l   %1,%1\n"                                \
+               "       sub.l   %R1,%R1\n"                              \
+               "       jra     3b\n"                                   \
+               "       .previous\n"                                    \
+               "\n"                                                    \
+               "       .section __ex_table,\"a\"\n"                    \
+               "       .align  4\n"                                    \
+               "       .long   1b,10b\n"                               \
+               "       .long   2b,10b\n"                               \
+               "       .previous"                                      \
+               : "+d" (res), "=&r" (__gu_val.l),                       \
+                 "+a" (__gu_ptr)                                       \
+               : "i" (-EFAULT)                                         \
+               : "memory");                                            \
+       (x) = __gu_val.t;                                               \
+} while (0)
+
 #define __get_user(x, ptr)                                             \
 ({                                                                     \
        int __gu_err = 0;                                               \
        __chk_user_ptr(ptr);                                            \
        switch (sizeof(*(ptr))) {                                       \
        case 1:                                                         \
-               __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT);    \
+               __get_user_asm(MOVES, __gu_err, x, ptr, u8, b, d, -EFAULT); \
                break;                                                  \
        case 2:                                                         \
-               __get_user_asm(__gu_err, x, ptr, u16, w, r, -EFAULT);   \
+               __get_user_asm(MOVES, __gu_err, x, ptr, u16, w, r, -EFAULT); \
                break;                                                  \
        case 4:                                                         \
-               __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT);   \
+               __get_user_asm(MOVES, __gu_err, x, ptr, u32, l, r, -EFAULT); \
                break;                                                  \
-       case 8: {                                                       \
-               const void __user *__gu_ptr = (ptr);                    \
-               union {                                                 \
-                       u64 l;                                          \
-                       __typeof__(*(ptr)) t;                           \
-               } __gu_val;                                             \
-               asm volatile ("\n"                                      \
-                       "1:     "MOVES".l       (%2)+,%1\n"             \
-                       "2:     "MOVES".l       (%2),%R1\n"             \
-                       "3:\n"                                          \
-                       "       .section .fixup,\"ax\"\n"               \
-                       "       .even\n"                                \
-                       "10:    move.l  %3,%0\n"                        \
-                       "       sub.l   %1,%1\n"                        \
-                       "       sub.l   %R1,%R1\n"                      \
-                       "       jra     3b\n"                           \
-                       "       .previous\n"                            \
-                       "\n"                                            \
-                       "       .section __ex_table,\"a\"\n"            \
-                       "       .align  4\n"                            \
-                       "       .long   1b,10b\n"                       \
-                       "       .long   2b,10b\n"                       \
-                       "       .previous"                              \
-                       : "+d" (__gu_err), "=&r" (__gu_val.l),          \
-                         "+a" (__gu_ptr)                               \
-                       : "i" (-EFAULT)                                 \
-                       : "memory");                                    \
-               (x) = __gu_val.t;                                       \
+       case 8:                                                         \
+               __get_user_asm8(MOVES, __gu_err, x, ptr);               \
                break;                                                  \
-       }                                                               \
        default:                                                        \
-               __gu_err = __get_user_bad();                            \
-               break;                                                  \
+               BUILD_BUG();                                            \
        }                                                               \
        __gu_err;                                                       \
 })
@@ -322,16 +329,19 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
 
        switch (n) {
        case 1:
-               __put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1);
+               __put_user_asm(MOVES, res, *(u8 *)from, (u8 __user *)to,
+                               b, d, 1);
                break;
        case 2:
-               __put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, r, 2);
+               __put_user_asm(MOVES, res, *(u16 *)from, (u16 __user *)to,
+                               w, r, 2);
                break;
        case 3:
                __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
                break;
        case 4:
-               __put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4);
+               __put_user_asm(MOVES, res, *(u32 *)from, (u32 __user *)to,
+                               l, r, 4);
                break;
        case 5:
                __constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
@@ -380,8 +390,65 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 #define INLINE_COPY_FROM_USER
 #define INLINE_COPY_TO_USER
 
-#define user_addr_max() \
-       (uaccess_kernel() ? ~0UL : TASK_SIZE)
+#define HAVE_GET_KERNEL_NOFAULT
+
+#define __get_kernel_nofault(dst, src, type, err_label)                        \
+do {                                                                   \
+       type *__gk_dst = (type *)(dst);                                 \
+       type *__gk_src = (type *)(src);                                 \
+       int __gk_err = 0;                                               \
+                                                                       \
+       switch (sizeof(type)) {                                         \
+       case 1:                                                         \
+               __get_user_asm("move", __gk_err, *__gk_dst, __gk_src,   \
+                               u8, b, d, -EFAULT);                     \
+               break;                                                  \
+       case 2:                                                         \
+               __get_user_asm("move", __gk_err, *__gk_dst, __gk_src,   \
+                               u16, w, r, -EFAULT);                    \
+               break;                                                  \
+       case 4:                                                         \
+               __get_user_asm("move", __gk_err, *__gk_dst, __gk_src,   \
+                               u32, l, r, -EFAULT);                    \
+               break;                                                  \
+       case 8:                                                         \
+               __get_user_asm8("move", __gk_err, *__gk_dst, __gk_src); \
+               break;                                                  \
+       default:                                                        \
+               BUILD_BUG();                                            \
+       }                                                               \
+       if (unlikely(__gk_err))                                         \
+               goto err_label;                                         \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, err_label)                        \
+do {                                                                   \
+       type __pk_src = *(type *)(src);                                 \
+       type *__pk_dst = (type *)(dst);                                 \
+       int __pk_err = 0;                                               \
+                                                                       \
+       switch (sizeof(type)) {                                         \
+       case 1:                                                         \
+               __put_user_asm("move", __pk_err, __pk_src, __pk_dst,    \
+                               b, d, -EFAULT);                         \
+               break;                                                  \
+       case 2:                                                         \
+               __put_user_asm("move", __pk_err, __pk_src, __pk_dst,    \
+                               w, r, -EFAULT);                         \
+               break;                                                  \
+       case 4:                                                         \
+               __put_user_asm("move", __pk_err, __pk_src, __pk_dst,    \
+                               l, r, -EFAULT);                         \
+               break;                                                  \
+       case 8:                                                         \
+               __put_user_asm8("move", __pk_err, __pk_src, __pk_dst);  \
+               break;                                                  \
+       default:                                                        \
+               BUILD_BUG();                                            \
+       }                                                               \
+       if (unlikely(__pk_err))                                         \
+               goto err_label;                                         \
+} while (0)
 
 extern long strncpy_from_user(char *dst, const char __user *src, long count);
 extern __must_check long strnlen_user(const char __user *str, long n);
index ccea355052efad8994f9682eed160b9fde6ba57c..906d73230537440fde199e779bd484683a0e8d9d 100644 (file)
@@ -31,7 +31,7 @@ int main(void)
        DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
        DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
        DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
-       DEFINE(THREAD_FS, offsetof(struct thread_struct, fs));
+       DEFINE(THREAD_FC, offsetof(struct thread_struct, fc));
        DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp));
        DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
        DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp));
index 9dd76fbb7c6b2752b0a8eefb847a65e2f26251b1..9434fca68de5d018b9f301f9b8813f7b6826c667 100644 (file)
@@ -36,7 +36,6 @@
 #include <linux/linkage.h>
 #include <asm/errno.h>
 #include <asm/setup.h>
-#include <asm/segment.h>
 #include <asm/traps.h>
 #include <asm/unistd.h>
 #include <asm/asm-offsets.h>
@@ -78,20 +77,38 @@ ENTRY(__sys_clone3)
 
 ENTRY(sys_sigreturn)
        SAVE_SWITCH_STACK
-       movel   %sp,%sp@-                 | switch_stack pointer
-       pea     %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
+       movel   %sp,%a1                         | switch_stack pointer
+       lea     %sp@(SWITCH_STACK_SIZE),%a0     | pt_regs pointer
+       lea     %sp@(-84),%sp                   | leave a gap
+       movel   %a1,%sp@-
+       movel   %a0,%sp@-
        jbsr    do_sigreturn
-       addql   #8,%sp
-       RESTORE_SWITCH_STACK
-       rts
+       jra     1f                              | shared with rt_sigreturn()
 
 ENTRY(sys_rt_sigreturn)
        SAVE_SWITCH_STACK
-       movel   %sp,%sp@-                 | switch_stack pointer
-       pea     %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
+       movel   %sp,%a1                         | switch_stack pointer
+       lea     %sp@(SWITCH_STACK_SIZE),%a0     | pt_regs pointer
+       lea     %sp@(-84),%sp                   | leave a gap
+       movel   %a1,%sp@-
+       movel   %a0,%sp@-
+       | stack contents:
+       |   [original pt_regs address] [original switch_stack address]
+       |   [gap] [switch_stack] [pt_regs] [exception frame]
        jbsr    do_rt_sigreturn
-       addql   #8,%sp
+
+1:
+       | stack contents now:
+       |   [original pt_regs address] [original switch_stack address]
+       |   [unused part of the gap] [moved switch_stack] [moved pt_regs]
+       |   [replacement exception frame]
+       | return value of do_{rt_,}sigreturn() points to moved switch_stack.
+
+       movel   %d0,%sp                         | discard the leftover junk
        RESTORE_SWITCH_STACK
+       | stack contents now is just [syscall return address] [pt_regs] [frame]
+       | return pt_regs.d0
+       movel   %sp@(PT_OFF_D0+4),%d0
        rts
 
 ENTRY(buserr)
@@ -182,25 +199,6 @@ do_trace_exit:
        addql   #4,%sp
        jra     .Lret_from_exception
 
-ENTRY(ret_from_signal)
-       movel   %curptr@(TASK_STACK),%a1
-       tstb    %a1@(TINFO_FLAGS+2)
-       jge     1f
-       jbsr    syscall_trace
-1:     RESTORE_SWITCH_STACK
-       addql   #4,%sp
-/* on 68040 complete pending writebacks if any */
-#ifdef CONFIG_M68040
-       bfextu  %sp@(PT_OFF_FORMATVEC){#0,#4},%d0
-       subql   #7,%d0                          | bus error frame ?
-       jbne    1f
-       movel   %sp,%sp@-
-       jbsr    berr_040cleanup
-       addql   #4,%sp
-1:
-#endif
-       jra     .Lret_from_exception
-
 ENTRY(system_call)
        SAVE_ALL_SYS
 
@@ -338,7 +336,7 @@ resume:
 
        /* save fs (sfc,%dfc) (may be pointing to kernel memory) */
        movec   %sfc,%d0
-       movew   %d0,%a0@(TASK_THREAD+THREAD_FS)
+       movew   %d0,%a0@(TASK_THREAD+THREAD_FC)
 
        /* save usp */
        /* it is better to use a movel here instead of a movew 8*) */
@@ -424,7 +422,7 @@ resume:
        movel   %a0,%usp
 
        /* restore fs (sfc,%dfc) */
-       movew   %a1@(TASK_THREAD+THREAD_FS),%a0
+       movew   %a1@(TASK_THREAD+THREAD_FC),%a0
        movec   %a0,%sfc
        movec   %a0,%dfc
 
index db49f90917112b8d76769050b5ed774e32b9550d..1ab692b952cd6235db7418cdb7b4cf6a83155f4e 100644 (file)
@@ -92,7 +92,7 @@ void show_regs(struct pt_regs * regs)
 
 void flush_thread(void)
 {
-       current->thread.fs = __USER_DS;
+       current->thread.fc = USER_DATA;
 #ifdef CONFIG_FPU
        if (!FPU_IS_EMU) {
                unsigned long zero = 0;
@@ -155,7 +155,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
         * Must save the current SFC/DFC value, NOT the value when
         * the parent was last descheduled - RGH  10-08-96
         */
-       p->thread.fs = get_fs().seg;
+       p->thread.fc = USER_DATA;
 
        if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
                /* kernel thread */
index 8f215e79e70e61c1a69b949aac19a6ff8b5a43d9..338817d0cb3fb100609c1c38dc899d27ff5508ba 100644 (file)
@@ -447,7 +447,7 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
 
        if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
                fpu_version = sc->sc_fpstate[0];
-               if (CPU_IS_020_OR_030 &&
+               if (CPU_IS_020_OR_030 && !regs->stkadj &&
                    regs->vector >= (VEC_FPBRUC * 4) &&
                    regs->vector <= (VEC_FPNAN * 4)) {
                        /* Clear pending exception in 68882 idle frame */
@@ -510,7 +510,7 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *
                if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
                        context_size = fpstate[1];
                fpu_version = fpstate[0];
-               if (CPU_IS_020_OR_030 &&
+               if (CPU_IS_020_OR_030 && !regs->stkadj &&
                    regs->vector >= (VEC_FPBRUC * 4) &&
                    regs->vector <= (VEC_FPNAN * 4)) {
                        /* Clear pending exception in 68882 idle frame */
@@ -641,56 +641,35 @@ static inline void siginfo_build_tests(void)
 static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
                               void __user *fp)
 {
-       int fsize = frame_extra_sizes(formatvec >> 12);
-       if (fsize < 0) {
+       int extra = frame_extra_sizes(formatvec >> 12);
+       char buf[sizeof_field(struct frame, un)];
+
+       if (extra < 0) {
                /*
                 * user process trying to return with weird frame format
                 */
                pr_debug("user process returning with weird frame format\n");
-               return 1;
+               return -1;
        }
-       if (!fsize) {
-               regs->format = formatvec >> 12;
-               regs->vector = formatvec & 0xfff;
-       } else {
-               struct switch_stack *sw = (struct switch_stack *)regs - 1;
-               /* yes, twice as much as max(sizeof(frame.un.fmt<x>)) */
-               unsigned long buf[sizeof_field(struct frame, un) / 2];
-
-               /* that'll make sure that expansion won't crap over data */
-               if (copy_from_user(buf + fsize / 4, fp, fsize))
-                       return 1;
-
-               /* point of no return */
-               regs->format = formatvec >> 12;
-               regs->vector = formatvec & 0xfff;
-#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
-               __asm__ __volatile__ (
-#ifdef CONFIG_COLDFIRE
-                        "   movel %0,%/sp\n\t"
-                        "   bra ret_from_signal\n"
-#else
-                        "   movel %0,%/a0\n\t"
-                        "   subl %1,%/a0\n\t"     /* make room on stack */
-                        "   movel %/a0,%/sp\n\t"  /* set stack pointer */
-                        /* move switch_stack and pt_regs */
-                        "1: movel %0@+,%/a0@+\n\t"
-                        "   dbra %2,1b\n\t"
-                        "   lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
-                        "   lsrl  #2,%1\n\t"
-                        "   subql #1,%1\n\t"
-                        /* copy to the gap we'd made */
-                        "2: movel %4@+,%/a0@+\n\t"
-                        "   dbra %1,2b\n\t"
-                        "   bral ret_from_signal\n"
+       if (extra && copy_from_user(buf, fp, extra))
+               return -1;
+       regs->format = formatvec >> 12;
+       regs->vector = formatvec & 0xfff;
+       if (extra) {
+               void *p = (struct switch_stack *)regs - 1;
+               struct frame *new = (void *)regs - extra;
+               int size = sizeof(struct pt_regs)+sizeof(struct switch_stack);
+
+               memmove(p - extra, p, size);
+               memcpy(p - extra + size, buf, extra);
+               current->thread.esp0 = (unsigned long)&new->ptregs;
+#ifdef CONFIG_M68040
+               /* on 68040 complete pending writebacks if any */
+               if (new->ptregs.format == 7) // bus error frame
+                       berr_040cleanup(new);
 #endif
-                        : /* no outputs, it doesn't ever return */
-                        : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
-                          "n" (frame_offset), "a" (buf + fsize/4)
-                        : "a0");
-#undef frame_offset
        }
-       return 0;
+       return extra;
 }
 
 static inline int
@@ -698,7 +677,6 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __u
 {
        int formatvec;
        struct sigcontext context;
-       int err = 0;
 
        siginfo_build_tests();
 
@@ -707,7 +685,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __u
 
        /* get previous context */
        if (copy_from_user(&context, usc, sizeof(context)))
-               goto badframe;
+               return -1;
 
        /* restore passed registers */
        regs->d0 = context.sc_d0;
@@ -720,15 +698,10 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __u
        wrusp(context.sc_usp);
        formatvec = context.sc_formatvec;
 
-       err = restore_fpu_state(&context);
+       if (restore_fpu_state(&context))
+               return -1;
 
-       if (err || mangle_kernel_stack(regs, formatvec, fp))
-               goto badframe;
-
-       return 0;
-
-badframe:
-       return 1;
+       return mangle_kernel_stack(regs, formatvec, fp);
 }
 
 static inline int
@@ -745,7 +718,7 @@ rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
 
        err = __get_user(temp, &uc->uc_mcontext.version);
        if (temp != MCONTEXT_VERSION)
-               goto badframe;
+               return -1;
        /* restore passed registers */
        err |= __get_user(regs->d0, &gregs[0]);
        err |= __get_user(regs->d1, &gregs[1]);
@@ -774,22 +747,17 @@ rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
        err |= restore_altstack(&uc->uc_stack);
 
        if (err)
-               goto badframe;
+               return -1;
 
-       if (mangle_kernel_stack(regs, temp, &uc->uc_extra))
-               goto badframe;
-
-       return 0;
-
-badframe:
-       return 1;
+       return mangle_kernel_stack(regs, temp, &uc->uc_extra);
 }
 
-asmlinkage int do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
+asmlinkage void *do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
 {
        unsigned long usp = rdusp();
        struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
        sigset_t set;
+       int size;
 
        if (!access_ok(frame, sizeof(*frame)))
                goto badframe;
@@ -801,20 +769,22 @@ asmlinkage int do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
 
        set_current_blocked(&set);
 
-       if (restore_sigcontext(regs, &frame->sc, frame + 1))
+       size = restore_sigcontext(regs, &frame->sc, frame + 1);
+       if (size < 0)
                goto badframe;
-       return regs->d0;
+       return (void *)sw - size;
 
 badframe:
        force_sig(SIGSEGV);
-       return 0;
+       return sw;
 }
 
-asmlinkage int do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
+asmlinkage void *do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
 {
        unsigned long usp = rdusp();
        struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
        sigset_t set;
+       int size;
 
        if (!access_ok(frame, sizeof(*frame)))
                goto badframe;
@@ -823,27 +793,34 @@ asmlinkage int do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
 
        set_current_blocked(&set);
 
-       if (rt_restore_ucontext(regs, sw, &frame->uc))
+       size = rt_restore_ucontext(regs, sw, &frame->uc);
+       if (size < 0)
                goto badframe;
-       return regs->d0;
+       return (void *)sw - size;
 
 badframe:
        force_sig(SIGSEGV);
-       return 0;
+       return sw;
+}
+
+static inline struct pt_regs *rte_regs(struct pt_regs *regs)
+{
+       return (void *)regs + regs->stkadj;
 }
 
 static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
                             unsigned long mask)
 {
+       struct pt_regs *tregs = rte_regs(regs);
        sc->sc_mask = mask;
        sc->sc_usp = rdusp();
        sc->sc_d0 = regs->d0;
        sc->sc_d1 = regs->d1;
        sc->sc_a0 = regs->a0;
        sc->sc_a1 = regs->a1;
-       sc->sc_sr = regs->sr;
-       sc->sc_pc = regs->pc;
-       sc->sc_formatvec = regs->format << 12 | regs->vector;
+       sc->sc_sr = tregs->sr;
+       sc->sc_pc = tregs->pc;
+       sc->sc_formatvec = tregs->format << 12 | tregs->vector;
        save_a5_state(sc, regs);
        save_fpu_state(sc, regs);
 }
@@ -851,6 +828,7 @@ static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
 static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
 {
        struct switch_stack *sw = (struct switch_stack *)regs - 1;
+       struct pt_regs *tregs = rte_regs(regs);
        greg_t __user *gregs = uc->uc_mcontext.gregs;
        int err = 0;
 
@@ -871,9 +849,9 @@ static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *
        err |= __put_user(sw->a5, &gregs[13]);
        err |= __put_user(sw->a6, &gregs[14]);
        err |= __put_user(rdusp(), &gregs[15]);
-       err |= __put_user(regs->pc, &gregs[16]);
-       err |= __put_user(regs->sr, &gregs[17]);
-       err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);
+       err |= __put_user(tregs->pc, &gregs[16]);
+       err |= __put_user(tregs->sr, &gregs[17]);
+       err |= __put_user((tregs->format << 12) | tregs->vector, &uc->uc_formatvec);
        err |= rt_save_fpu_state(uc, regs);
        return err;
 }
@@ -890,13 +868,14 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
                        struct pt_regs *regs)
 {
        struct sigframe __user *frame;
-       int fsize = frame_extra_sizes(regs->format);
+       struct pt_regs *tregs = rte_regs(regs);
+       int fsize = frame_extra_sizes(tregs->format);
        struct sigcontext context;
        int err = 0, sig = ksig->sig;
 
        if (fsize < 0) {
                pr_debug("setup_frame: Unknown frame format %#x\n",
-                        regs->format);
+                        tregs->format);
                return -EFAULT;
        }
 
@@ -907,7 +886,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
 
        err |= __put_user(sig, &frame->sig);
 
-       err |= __put_user(regs->vector, &frame->code);
+       err |= __put_user(tregs->vector, &frame->code);
        err |= __put_user(&frame->sc, &frame->psc);
 
        if (_NSIG_WORDS > 1)
@@ -933,34 +912,28 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
 
        push_cache ((unsigned long) &frame->retcode);
 
-       /*
-        * Set up registers for signal handler.  All the state we are about
-        * to destroy is successfully copied to sigframe.
-        */
-       wrusp ((unsigned long) frame);
-       regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
-       adjustformat(regs);
-
        /*
         * This is subtle; if we build more than one sigframe, all but the
         * first one will see frame format 0 and have fsize == 0, so we won't
         * screw stkadj.
         */
-       if (fsize)
+       if (fsize) {
                regs->stkadj = fsize;
-
-       /* Prepare to skip over the extra stuff in the exception frame.  */
-       if (regs->stkadj) {
-               struct pt_regs *tregs =
-                       (struct pt_regs *)((ulong)regs + regs->stkadj);
+               tregs = rte_regs(regs);
                pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
-               /* This must be copied with decreasing addresses to
-                   handle overlaps.  */
                tregs->vector = 0;
                tregs->format = 0;
-               tregs->pc = regs->pc;
                tregs->sr = regs->sr;
        }
+
+       /*
+        * Set up registers for signal handler.  All the state we are about
+        * to destroy is successfully copied to sigframe.
+        */
+       wrusp ((unsigned long) frame);
+       tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
+       adjustformat(regs);
+
        return 0;
 }
 
@@ -968,7 +941,8 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
                           struct pt_regs *regs)
 {
        struct rt_sigframe __user *frame;
-       int fsize = frame_extra_sizes(regs->format);
+       struct pt_regs *tregs = rte_regs(regs);
+       int fsize = frame_extra_sizes(tregs->format);
        int err = 0, sig = ksig->sig;
 
        if (fsize < 0) {
@@ -1018,34 +992,27 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
 
        push_cache ((unsigned long) &frame->retcode);
 
-       /*
-        * Set up registers for signal handler.  All the state we are about
-        * to destroy is successfully copied to sigframe.
-        */
-       wrusp ((unsigned long) frame);
-       regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
-       adjustformat(regs);
-
        /*
         * This is subtle; if we build more than one sigframe, all but the
         * first one will see frame format 0 and have fsize == 0, so we won't
         * screw stkadj.
         */
-       if (fsize)
+       if (fsize) {
                regs->stkadj = fsize;
-
-       /* Prepare to skip over the extra stuff in the exception frame.  */
-       if (regs->stkadj) {
-               struct pt_regs *tregs =
-                       (struct pt_regs *)((ulong)regs + regs->stkadj);
+               tregs = rte_regs(regs);
                pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
-               /* This must be copied with decreasing addresses to
-                   handle overlaps.  */
                tregs->vector = 0;
                tregs->format = 0;
-               tregs->pc = regs->pc;
                tregs->sr = regs->sr;
        }
+
+       /*
+        * Set up registers for signal handler.  All the state we are about
+        * to destroy is successfully copied to sigframe.
+        */
+       wrusp ((unsigned long) frame);
+       tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
+       adjustformat(regs);
        return 0;
 }
 
index 5b19fcdcd69e96e31cd3db25bce50f5ea0a227b0..9718ce94cc845ab36c393b6dda9a349247bc6da3 100644 (file)
@@ -181,9 +181,8 @@ static inline void access_error060 (struct frame *fp)
 static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
 {
        unsigned long mmusr;
-       mm_segment_t old_fs = get_fs();
 
-       set_fs(MAKE_MM_SEG(wbs));
+       set_fc(wbs);
 
        if (iswrite)
                asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr));
@@ -192,7 +191,7 @@ static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
 
        asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr));
 
-       set_fs(old_fs);
+       set_fc(USER_DATA);
 
        return mmusr;
 }
@@ -201,10 +200,8 @@ static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
                                   unsigned long wbd)
 {
        int res = 0;
-       mm_segment_t old_fs = get_fs();
 
-       /* set_fs can not be moved, otherwise put_user() may oops */
-       set_fs(MAKE_MM_SEG(wbs));
+       set_fc(wbs);
 
        switch (wbs & WBSIZ_040) {
        case BA_SIZE_BYTE:
@@ -218,9 +215,7 @@ static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
                break;
        }
 
-       /* set_fs can not be moved, otherwise put_user() may oops */
-       set_fs(old_fs);
-
+       set_fc(USER_DATA);
 
        pr_debug("do_040writeback1, res=%d\n", res);
 
index 90f4e9ca1276b5039ff95f9487bd3c996f6d5e00..4fab3479175865d47b579a965b1249fac4dbc4c3 100644 (file)
@@ -18,7 +18,6 @@
 
 #include <linux/uaccess.h>
 #include <asm/io.h>
-#include <asm/segment.h>
 #include <asm/setup.h>
 #include <asm/macintosh.h>
 #include <asm/mac_via.h>
index b486c0889eece6e81c597c3a8f30ea80e437d086..dde978e66f14fb311740b2f9b009a8d36e86e109 100644 (file)
@@ -49,24 +49,7 @@ static unsigned long virt_to_phys_slow(unsigned long vaddr)
                if (mmusr & MMU_R_040)
                        return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
        } else {
-               unsigned short mmusr;
-               unsigned long *descaddr;
-
-               asm volatile ("ptestr %3,%2@,#7,%0\n\t"
-                             "pmove %%psr,%1"
-                             : "=a&" (descaddr), "=m" (mmusr)
-                             : "a" (vaddr), "d" (get_fs().seg));
-               if (mmusr & (MMU_I|MMU_B|MMU_L))
-                       return 0;
-               descaddr = phys_to_virt((unsigned long)descaddr);
-               switch (mmusr & MMU_NUM) {
-               case 1:
-                       return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff);
-               case 2:
-                       return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff);
-               case 3:
-                       return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
-               }
+               WARN_ON_ONCE(!CPU_IS_040_OR_060);
        }
        return 0;
 }
@@ -107,11 +90,9 @@ void flush_icache_user_range(unsigned long address, unsigned long endaddr)
 
 void flush_icache_range(unsigned long address, unsigned long endaddr)
 {
-       mm_segment_t old_fs = get_fs();
-
-       set_fs(KERNEL_DS);
+       set_fc(SUPER_DATA);
        flush_icache_user_range(address, endaddr);
-       set_fs(old_fs);
+       set_fc(USER_DATA);
 }
 EXPORT_SYMBOL(flush_icache_range);
 
index 5d749e188246d736e2eda5d393dc22b3449e4c7b..1b47bec15832066542b6cfa53e3e46489459c147 100644 (file)
@@ -72,12 +72,6 @@ void __init paging_init(void)
        if (!empty_zero_page)
                panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
                      __func__, PAGE_SIZE, PAGE_SIZE);
-
-       /*
-        * Set up SFC/DFC registers (user data space).
-        */
-       set_fs (USER_DS);
-
        max_zone_pfn[ZONE_DMA] = end_mem >> PAGE_SHIFT;
        free_area_init(max_zone_pfn);
 }
index 1269d513b2217f84e725e8b6cda8503db104ee83..20ddf71b43d05a2c42819acbce516d2bc5a9444a 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/vmalloc.h>
 
 #include <asm/setup.h>
-#include <asm/segment.h>
 #include <asm/page.h>
 #include <asm/io.h>
 #include <asm/tlbflush.h>
index fe75aecfb238a3c7f0fdf24733ce1836cdb8de92..c2c03b0a1567f9d37c2fe417b19d5e9719ddb8b3 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/gfp.h>
 
 #include <asm/setup.h>
-#include <asm/segment.h>
 #include <asm/page.h>
 #include <asm/traps.h>
 #include <asm/machdep.h>
index 3a653f0a4188d4af82046e8ff4283ff916c43c41..9f3f77785aa78a013ba26b31da01d12a2d5d645f 100644 (file)
@@ -467,7 +467,7 @@ void __init paging_init(void)
        /*
         * Set up SFC/DFC registers
         */
-       set_fs(KERNEL_DS);
+       set_fc(USER_DATA);
 
 #ifdef DEBUG
        printk ("before free_area_init\n");
index f7dd47232b6ca9207fc2e1ed28dfefecd18671aa..203f428a0344a73534ecd1ddc2068f108c282730 100644 (file)
@@ -31,7 +31,6 @@
 #include <asm/intersil.h>
 #include <asm/irq.h>
 #include <asm/sections.h>
-#include <asm/segment.h>
 #include <asm/sun3ints.h>
 
 char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
@@ -89,7 +88,7 @@ void __init sun3_init(void)
        sun3_reserved_pmeg[249] = 1;
        sun3_reserved_pmeg[252] = 1;
        sun3_reserved_pmeg[253] = 1;
-       set_fs(KERNEL_DS);
+       set_fc(USER_DATA);
 }
 
 /* Without this, Bad Things happen when something calls arch_reset. */
index 7aa879b7c7ff57423ff792a6275807e5e725c34d..7ec20817c0c9eaffe487a5347aecd1c280866b03 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/uaccess.h>
 #include <asm/page.h>
 #include <asm/sun3mmu.h>
-#include <asm/segment.h>
 #include <asm/oplib.h>
 #include <asm/mmu_context.h>
 #include <asm/dvma.h>
@@ -191,14 +190,13 @@ void __init mmu_emu_init(unsigned long bootmem_end)
        for(seg = 0; seg < PAGE_OFFSET; seg += SUN3_PMEG_SIZE)
                sun3_put_segmap(seg, SUN3_INVALID_PMEG);
 
-       set_fs(MAKE_MM_SEG(3));
+       set_fc(3);
        for(seg = 0; seg < 0x10000000; seg += SUN3_PMEG_SIZE) {
                i = sun3_get_segmap(seg);
                for(j = 1; j < CONTEXTS_NUM; j++)
                        (*(romvec->pv_setctxt))(j, (void *)seg, i);
        }
-       set_fs(KERNEL_DS);
-
+       set_fc(USER_DATA);
 }
 
 /* erase the mappings for a dead context.  Uses the pg_dir for hints
index 41ae422119d32826c92b6d7bcd35608bd9735176..36cc280a4505f5d2456b6c917c8c4eb03225a859 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/sched.h>
 #include <linux/kernel_stat.h>
 #include <linux/interrupt.h>
-#include <asm/segment.h>
 #include <asm/intersil.h>
 #include <asm/oplib.h>
 #include <asm/sun3ints.h>
index 74d2fe57524b36694d2e6c5d08e7d8ff5f2ee8c0..64c23bfaa90c5ba9006002c6e91969c776b6bf0c 100644 (file)
@@ -14,7 +14,6 @@
 #include <asm/traps.h>
 #include <asm/sun3xprom.h>
 #include <asm/idprom.h>
-#include <asm/segment.h>
 #include <asm/sun3ints.h>
 #include <asm/openprom.h>
 #include <asm/machines.h>
index 771ca53af06d2643a1515bbccfd20f72faf7ee29..6b8f591c5054ca49ea1886d54e855d2bfafb171d 100644 (file)
@@ -3316,8 +3316,6 @@ source "drivers/cpuidle/Kconfig"
 
 endmenu
 
-source "drivers/firmware/Kconfig"
-
 source "arch/mips/kvm/Kconfig"
 
 source "arch/mips/vdso/Kconfig"
index 35fb8ee6dd33ec27262680bc1a0b245ffd285a78..fd43d876892ec4c63227f4c788d80daa20b3c542 100644 (file)
@@ -10,8 +10,6 @@
 #include <linux/io.h>
 #include <linux/types.h>
 
-#include <asm/mips-boards/launch.h>
-
 extern unsigned long __cps_access_bad_size(void)
        __compiletime_error("Bad size for CPS accessor");
 
@@ -167,30 +165,11 @@ static inline uint64_t mips_cps_cluster_config(unsigned int cluster)
  */
 static inline unsigned int mips_cps_numcores(unsigned int cluster)
 {
-       unsigned int ncores;
-
        if (!mips_cm_present())
                return 0;
 
        /* Add one before masking to handle 0xff indicating no cores */
-       ncores = (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES;
-
-       if (IS_ENABLED(CONFIG_SOC_MT7621)) {
-               struct cpulaunch *launch;
-
-               /*
-                * Ralink MT7621S SoC is single core, but the GCR_CONFIG method
-                * always reports 2 cores. Check the second core's LAUNCH_FREADY
-                * flag to detect if the second core is missing. This method
-                * only works before the core has been started.
-                */
-               launch = (struct cpulaunch *)CKSEG0ADDR(CPULAUNCH);
-               launch += 2; /* MT7621 has 2 VPEs per core */
-               if (!(launch->flags & LAUNCH_FREADY))
-                       ncores = 1;
-       }
-
-       return ncores;
+       return (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES;
 }
 
 /**
index 0af88622c619253d1a5284739c709d24af32f9a7..cb6d22439f71b0e3460b5e1b40bad9459f33bc71 100644 (file)
@@ -662,6 +662,11 @@ static void build_epilogue(struct jit_ctx *ctx)
        ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
         func##_positive)
 
+static bool is_bad_offset(int b_off)
+{
+       return b_off > 0x1ffff || b_off < -0x20000;
+}
+
 static int build_body(struct jit_ctx *ctx)
 {
        const struct bpf_prog *prog = ctx->skf;
@@ -728,7 +733,10 @@ load_common:
                        /* Load return register on DS for failures */
                        emit_reg_move(r_ret, r_zero, ctx);
                        /* Return with error */
-                       emit_b(b_imm(prog->len, ctx), ctx);
+                       b_off = b_imm(prog->len, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_b(b_off, ctx);
                        emit_nop(ctx);
                        break;
                case BPF_LD | BPF_W | BPF_IND:
@@ -775,8 +783,10 @@ load_ind:
                        emit_jalr(MIPS_R_RA, r_s0, ctx);
                        emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
                        /* Check the error value */
-                       emit_bcond(MIPS_COND_NE, r_ret, 0,
-                                  b_imm(prog->len, ctx), ctx);
+                       b_off = b_imm(prog->len, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_bcond(MIPS_COND_NE, r_ret, 0, b_off, ctx);
                        emit_reg_move(r_ret, r_zero, ctx);
                        /* We are good */
                        /* X <- P[1:K] & 0xf */
@@ -855,8 +865,10 @@ load_ind:
                        /* A /= X */
                        ctx->flags |= SEEN_X | SEEN_A;
                        /* Check if r_X is zero */
-                       emit_bcond(MIPS_COND_EQ, r_X, r_zero,
-                                  b_imm(prog->len, ctx), ctx);
+                       b_off = b_imm(prog->len, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
                        emit_load_imm(r_ret, 0, ctx); /* delay slot */
                        emit_div(r_A, r_X, ctx);
                        break;
@@ -864,8 +876,10 @@ load_ind:
                        /* A %= X */
                        ctx->flags |= SEEN_X | SEEN_A;
                        /* Check if r_X is zero */
-                       emit_bcond(MIPS_COND_EQ, r_X, r_zero,
-                                  b_imm(prog->len, ctx), ctx);
+                       b_off = b_imm(prog->len, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
                        emit_load_imm(r_ret, 0, ctx); /* delay slot */
                        emit_mod(r_A, r_X, ctx);
                        break;
@@ -926,7 +940,10 @@ load_ind:
                        break;
                case BPF_JMP | BPF_JA:
                        /* pc += K */
-                       emit_b(b_imm(i + k + 1, ctx), ctx);
+                       b_off = b_imm(i + k + 1, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_b(b_off, ctx);
                        emit_nop(ctx);
                        break;
                case BPF_JMP | BPF_JEQ | BPF_K:
@@ -1056,12 +1073,16 @@ jmp_cmp:
                        break;
                case BPF_RET | BPF_A:
                        ctx->flags |= SEEN_A;
-                       if (i != prog->len - 1)
+                       if (i != prog->len - 1) {
                                /*
                                 * If this is not the last instruction
                                 * then jump to the epilogue
                                 */
-                               emit_b(b_imm(prog->len, ctx), ctx);
+                               b_off = b_imm(prog->len, ctx);
+                               if (is_bad_offset(b_off))
+                                       return -E2BIG;
+                               emit_b(b_off, ctx);
+                       }
                        emit_reg_move(r_ret, r_A, ctx); /* delay slot */
                        break;
                case BPF_RET | BPF_K:
@@ -1075,7 +1096,10 @@ jmp_cmp:
                                 * If this is not the last instruction
                                 * then jump to the epilogue
                                 */
-                               emit_b(b_imm(prog->len, ctx), ctx);
+                               b_off = b_imm(prog->len, ctx);
+                               if (is_bad_offset(b_off))
+                                       return -E2BIG;
+                               emit_b(b_off, ctx);
                                emit_nop(ctx);
                        }
                        break;
@@ -1133,8 +1157,10 @@ jmp_cmp:
                        /* Load *dev pointer */
                        emit_load_ptr(r_s0, r_skb, off, ctx);
                        /* error (0) in the delay slot */
-                       emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
-                                  b_imm(prog->len, ctx), ctx);
+                       b_off = b_imm(prog->len, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_bcond(MIPS_COND_EQ, r_s0, r_zero, b_off, ctx);
                        emit_reg_move(r_ret, r_zero, ctx);
                        if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
                                BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);
@@ -1244,7 +1270,10 @@ void bpf_jit_compile(struct bpf_prog *fp)
 
        /* Generate the actual JIT code */
        build_prologue(&ctx);
-       build_body(&ctx);
+       if (build_body(&ctx)) {
+               module_memfree(ctx.target);
+               goto out;
+       }
        build_epilogue(&ctx);
 
        /* Update the icache */
index 0e23e3a8df6b556e8331c03e3118be4c2c31e7a1..d55b73b18149e138f6feb68587bb5f846b1d4476 100644 (file)
@@ -6,7 +6,7 @@
 
 #ifndef CONFIG_DYNAMIC_FTRACE
 extern void (*ftrace_trace_function)(unsigned long, unsigned long,
-                                    struct ftrace_ops*, struct pt_regs*);
+                                    struct ftrace_ops*, struct ftrace_regs*);
 extern void ftrace_graph_caller(void);
 
 noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
index a8bc06e96ef58d6e84ac37b1cc28f2fdf9ef8a6d..ca1beb87f987c6e4161892a67fa8bd13a00f6252 100644 (file)
@@ -3,9 +3,10 @@
 config EARLY_PRINTK
        bool "Activate early kernel debugging"
        default y
+       depends on TTY
        select SERIAL_CORE_CONSOLE
        help
-         Enable early printk on console
+         Enable early printk on console.
          This is useful for kernel debugging when your machine crashes very
          early before the console code is initialized.
          You should normally say N here, unless you want to debug such a crash.
index b3ec3e510706d5560e9513b53f1576d7f3cfb6d5..25acf27862f9155a7bff299e1544e22943b07a58 100644 (file)
@@ -9,7 +9,7 @@
 
 static inline unsigned long arch_local_save_flags(void)
 {
-       return RDCTL(CTL_STATUS);
+       return RDCTL(CTL_FSTATUS);
 }
 
 /*
@@ -18,7 +18,7 @@ static inline unsigned long arch_local_save_flags(void)
  */
 static inline void arch_local_irq_restore(unsigned long flags)
 {
-       WRCTL(CTL_STATUS, flags);
+       WRCTL(CTL_FSTATUS, flags);
 }
 
 static inline void arch_local_irq_disable(void)
index 183c720e454d915ad4690aa4e9184e4bf8f17270..95b67dd16f818866fb466ec27830371f3e2fcd00 100644 (file)
@@ -11,7 +11,7 @@
 #endif
 
 /* control register numbers */
-#define CTL_STATUS     0
+#define CTL_FSTATUS    0
 #define CTL_ESTATUS    1
 #define CTL_BSTATUS    2
 #define CTL_IENABLE    3
index cf8d687a2644a419663f7528c03ea06238a89a81..40bc8fb75e0b50fbb6fcc026169537e908d12390 100644 (file)
@@ -149,8 +149,6 @@ static void __init find_limits(unsigned long *min, unsigned long *max_low,
 
 void __init setup_arch(char **cmdline_p)
 {
-       int dram_start;
-
        console_verbose();
 
        memory_start = memblock_start_of_DRAM();
index 9e32fb7f3d4ce92f6e9c9876fcf1891354af99f2..e849daff6fd1622563abbcea7920de85613bec18 100644 (file)
@@ -37,6 +37,7 @@ config NIOS2_DTB_PHYS_ADDR
 
 config NIOS2_DTB_SOURCE_BOOL
        bool "Compile and link device tree into kernel image"
+       depends on !COMPILE_TEST
        help
          This allows you to specify a dts (device tree source) file
          which will be compiled and linked into the kernel image.
index 4742b6f169b7202f5669c996a9841d0cef0f3cab..27a8b49af11fc9612e10623bf250b415bd6f5552 100644 (file)
@@ -384,6 +384,4 @@ config KEXEC_FILE
 
 endmenu
 
-source "drivers/firmware/Kconfig"
-
 source "drivers/parisc/Kconfig"
index 5ba6fbfca2742b9d8f8f62a96e6a2874f201eaed..f82f85c65964cc477ad3b3b8a965e52ef1d490d0 100644 (file)
 
                        fm1mac3: ethernet@e4000 {
                                phy-handle = <&sgmii_aqr_phy3>;
-                               phy-connection-type = "sgmii-2500";
+                               phy-connection-type = "2500base-x";
                                sleep = <&rcpm 0x20000000>;
                        };
 
index d4b145b279f6c51bc2d644720af92db03e488350..9f38040f0641dc1b87e036f77b961961b21eab80 100644 (file)
@@ -136,6 +136,14 @@ static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
        if (kuap_is_disabled())
                return;
 
+       if (unlikely(kuap != KUAP_NONE)) {
+               current->thread.kuap = KUAP_NONE;
+               kuap_lock(kuap, false);
+       }
+
+       if (likely(regs->kuap == KUAP_NONE))
+               return;
+
        current->thread.kuap = regs->kuap;
 
        kuap_unlock(regs->kuap, false);
index a95f63788c6b1423e957f7e89871987f541b62b1..4ba834599c4d4c68bea72c13dd2a07f3a0db3c91 100644 (file)
@@ -23,6 +23,7 @@
 #define BRANCH_ABSOLUTE        0x2
 
 bool is_offset_in_branch_range(long offset);
+bool is_offset_in_cond_branch_range(long offset);
 int create_branch(struct ppc_inst *instr, const u32 *addr,
                  unsigned long target, int flags);
 int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
index 6b800d3e2681f6082a163b24d63a2be06f2f275d..a1d238255f077df9391dd606e22c71d44cdb0bbf 100644 (file)
@@ -265,13 +265,16 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
        local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
        local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
 
-       if (is_implicit_soft_masked(regs)) {
-               // Adjust regs->softe soft implicit soft-mask, so
-               // arch_irq_disabled_regs(regs) behaves as expected.
+       if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) {
+               /*
+                * Adjust regs->softe to be soft-masked if it had not been
+                * reconcied (e.g., interrupt entry with MSR[EE]=0 but softe
+                * not yet set disabled), or if it was in an implicit soft
+                * masked state. This makes arch_irq_disabled_regs(regs)
+                * behave as expected.
+                */
                regs->softe = IRQS_ALL_DISABLED;
        }
-       if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
-               BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
 
        /* Don't do any per-CPU operations until interrupt state is fixed */
 
@@ -525,10 +528,9 @@ static __always_inline long ____##func(struct pt_regs *regs)
 /* kernel/traps.c */
 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
 #ifdef CONFIG_PPC_BOOK3S_64
-DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception);
-#else
-DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
+DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async);
 #endif
+DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
 DECLARE_INTERRUPT_HANDLER(SMIException);
 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
 DECLARE_INTERRUPT_HANDLER(unknown_exception);
index 792eefaf230b80b52aaac7d54ae5ca71e2ced11c..27574f218b371f3f42ce98bb69821a1eaa723fe7 100644 (file)
@@ -39,6 +39,11 @@ static inline bool security_ftr_enabled(u64 feature)
        return !!(powerpc_security_features & feature);
 }
 
+#ifdef CONFIG_PPC_BOOK3S_64
+enum stf_barrier_type stf_barrier_type_get(void);
+#else
+static inline enum stf_barrier_type stf_barrier_type_get(void) { return STF_BARRIER_NONE; }
+#endif
 
 // Features indicating support for Spectre/Meltdown mitigations
 
index 111249fd619de8692323156c32063b2c51b2b7bc..038ce8d9061d166b8c85663adc2fa4dab08c23f1 100644 (file)
@@ -184,6 +184,15 @@ u64 dma_iommu_get_required_mask(struct device *dev)
        struct iommu_table *tbl = get_iommu_table_base(dev);
        u64 mask;
 
+       if (dev_is_pci(dev)) {
+               u64 bypass_mask = dma_direct_get_required_mask(dev);
+
+               if (dma_iommu_dma_supported(dev, bypass_mask)) {
+                       dev_info(dev, "%s: returning bypass mask 0x%llx\n", __func__, bypass_mask);
+                       return bypass_mask;
+               }
+       }
+
        if (!tbl)
                return 0;
 
index 37859e62a8dcba2af7ecebc5f158127121844063..eaf1f72131a18f8f6a376c332619ab0e00ecda7a 100644 (file)
@@ -1243,7 +1243,7 @@ EXC_COMMON_BEGIN(machine_check_common)
        li      r10,MSR_RI
        mtmsrd  r10,1
        addi    r3,r1,STACK_FRAME_OVERHEAD
-       bl      machine_check_exception
+       bl      machine_check_exception_async
        b       interrupt_return_srr
 
 
@@ -1303,7 +1303,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
        subi    r12,r12,1
        sth     r12,PACA_IN_MCE(r13)
 
-       /* Invoke machine_check_exception to print MCE event and panic. */
+       /*
+        * Invoke machine_check_exception to print MCE event and panic.
+        * This is the NMI version of the handler because we are called from
+        * the early handler which is a true NMI.
+        */
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      machine_check_exception
 
@@ -1665,27 +1669,30 @@ EXC_COMMON_BEGIN(program_check_common)
         */
 
        andi.   r10,r12,MSR_PR
-       bne     2f                      /* If userspace, go normal path */
+       bne     .Lnormal_stack          /* If userspace, go normal path */
 
        andis.  r10,r12,(SRR1_PROGTM)@h
-       bne     1f                      /* If TM, emergency             */
+       bne     .Lemergency_stack       /* If TM, emergency             */
 
        cmpdi   r1,-INT_FRAME_SIZE      /* check if r1 is in userspace  */
-       blt     2f                      /* normal path if not           */
+       blt     .Lnormal_stack          /* normal path if not           */
 
        /* Use the emergency stack                                      */
-1:     andi.   r10,r12,MSR_PR          /* Set CR0 correctly for label  */
+.Lemergency_stack:
+       andi.   r10,r12,MSR_PR          /* Set CR0 correctly for label  */
                                        /* 3 in EXCEPTION_PROLOG_COMMON */
        mr      r10,r1                  /* Save r1                      */
        ld      r1,PACAEMERGSP(r13)     /* Use emergency stack          */
        subi    r1,r1,INT_FRAME_SIZE    /* alloc stack frame            */
        __ISTACK(program_check)=0
        __GEN_COMMON_BODY program_check
-       b 3f
-2:
+       b .Ldo_program_check
+
+.Lnormal_stack:
        __ISTACK(program_check)=1
        __GEN_COMMON_BODY program_check
-3:
+
+.Ldo_program_check:
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      program_check_exception
        REST_NVGPRS(r1) /* instruction emulation may change GPRs */
index abb719b21cae7214be8d98d5a229ba10d21ce924..3d97fb833834d68a80c84ea7b3c54bf420862863 100644 (file)
@@ -126,14 +126,16 @@ _GLOBAL(idle_return_gpr_loss)
 /*
  * This is the sequence required to execute idle instructions, as
  * specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0.
- *
- * The 0(r1) slot is used to save r2 in isa206, so use that here.
+ * We have to store a GPR somewhere, ptesync, then reload it, and create
+ * a false dependency on the result of the load. It doesn't matter which
+ * GPR we store, or where we store it. We have already stored r2 to the
+ * stack at -8(r1) in isa206_idle_insn_mayloss, so use that.
  */
 #define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST)                  \
        /* Magic NAP/SLEEP/WINKLE mode enter sequence */        \
-       std     r2,0(r1);                                       \
+       std     r2,-8(r1);                                      \
        ptesync;                                                \
-       ld      r2,0(r1);                                       \
+       ld      r2,-8(r1);                                      \
 236:   cmpd    cr0,r2,r2;                                      \
        bne     236b;                                           \
        IDLE_INST;                                              \
index 551b653228c47fff328a35a0cc24eba8973899df..c4f1d6b7d99235c350d9f9cdbb706f7834ce9f23 100644 (file)
@@ -229,6 +229,9 @@ notrace void arch_local_irq_restore(unsigned long mask)
                return;
        }
 
+       if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+               WARN_ON_ONCE(in_nmi() || in_hardirq());
+
        /*
         * After the stb, interrupts are unmasked and there are no interrupts
         * pending replay. The restart sequence makes this atomic with
@@ -321,6 +324,9 @@ notrace void arch_local_irq_restore(unsigned long mask)
        if (mask)
                return;
 
+       if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+               WARN_ON_ONCE(in_nmi() || in_hardirq());
+
        /*
         * From this point onward, we can take interrupts, preempt,
         * etc... unless we got hard-disabled. We check if an event
index 1a998490fe60f04555c1a95422eb47a6d7c0e39a..15fb5ea1b9eafa7eae3a2e2eb0ba8cc61909b3a5 100644 (file)
@@ -263,6 +263,11 @@ static int __init handle_no_stf_barrier(char *p)
 
 early_param("no_stf_barrier", handle_no_stf_barrier);
 
+enum stf_barrier_type stf_barrier_type_get(void)
+{
+       return stf_enabled_flush_types;
+}
+
 /* This is the generic flag used by other architectures */
 static int __init handle_ssbd(char *p)
 {
index 9cc7d3dbf4392f2b842eb96df38d7918bcc99768..605bab448f847dc774c4026b02e83c423d65f894 100644 (file)
@@ -1730,8 +1730,6 @@ void __cpu_die(unsigned int cpu)
 
 void arch_cpu_idle_dead(void)
 {
-       sched_preempt_enable_no_resched();
-
        /*
         * Disable on the down path. This will be re-enabled by
         * start_secondary() via start_secondary_resume() below
index aac8c0412ff9f0829b79634a69c7f8a63cc5e2ef..11741703d26e0719b7b2d7488ebb54203fa2026d 100644 (file)
@@ -340,10 +340,16 @@ static bool exception_common(int signr, struct pt_regs *regs, int code,
                return false;
        }
 
-       show_signal_msg(signr, regs, code, addr);
+       /*
+        * Must not enable interrupts even for user-mode exception, because
+        * this can be called from machine check, which may be a NMI or IRQ
+        * which don't like interrupts being enabled. Could check for
+        * in_hardirq || in_nmi perhaps, but there doesn't seem to be a good
+        * reason why _exception() should enable irqs for an exception handler,
+        * the handlers themselves do that directly.
+        */
 
-       if (arch_irqs_disabled())
-               interrupt_cond_local_irq_enable(regs);
+       show_signal_msg(signr, regs, code, addr);
 
        current->thread.trap_nr = code;
 
@@ -790,24 +796,22 @@ void die_mce(const char *str, struct pt_regs *regs, long err)
         * do_exit() checks for in_interrupt() and panics in that case, so
         * exit the irq/nmi before calling die.
         */
-       if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
-               irq_exit();
-       else
+       if (in_nmi())
                nmi_exit();
+       else
+               irq_exit();
        die(str, regs, err);
 }
 
 /*
- * BOOK3S_64 does not call this handler as a non-maskable interrupt
+ * BOOK3S_64 does not usually call this handler as a non-maskable interrupt
  * (it uses its own early real-mode handler to handle the MCE proper
  * and then raises irq_work to call this handler when interrupts are
- * enabled).
+ * enabled). The only time when this is not true is if the early handler
+ * is unrecoverable, then it does call this directly to try to get a
+ * message out.
  */
-#ifdef CONFIG_PPC_BOOK3S_64
-DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception)
-#else
-DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
-#endif
+static void __machine_check_exception(struct pt_regs *regs)
 {
        int recover = 0;
 
@@ -841,12 +845,19 @@ bail:
        /* Must die if the interrupt is not recoverable */
        if (regs_is_unrecoverable(regs))
                die_mce("Unrecoverable Machine check", regs, SIGBUS);
+}
 
 #ifdef CONFIG_PPC_BOOK3S_64
-       return;
-#else
-       return 0;
+DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async)
+{
+       __machine_check_exception(regs);
+}
 #endif
+DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
+{
+       __machine_check_exception(regs);
+
+       return 0;
 }
 
 DEFINE_INTERRUPT_HANDLER(SMIException) /* async? */
index 90484425a1e6b283ddaa4a58383c65f37ffaf595..eb776d0c5d8e97115714e79ffdfcd9b8de177576 100644 (file)
@@ -255,13 +255,16 @@ kvm_novcpu_exit:
  * r3 contains the SRR1 wakeup value, SRR1 is trashed.
  */
 _GLOBAL(idle_kvm_start_guest)
-       ld      r4,PACAEMERGSP(r13)
        mfcr    r5
        mflr    r0
-       std     r1,0(r4)
-       std     r5,8(r4)
-       std     r0,16(r4)
-       subi    r1,r4,STACK_FRAME_OVERHEAD
+       std     r5, 8(r1)       // Save CR in caller's frame
+       std     r0, 16(r1)      // Save LR in caller's frame
+       // Create frame on emergency stack
+       ld      r4, PACAEMERGSP(r13)
+       stdu    r1, -SWITCH_FRAME_SIZE(r4)
+       // Switch to new frame on emergency stack
+       mr      r1, r4
+       std     r3, 32(r1)      // Save SRR1 wakeup value
        SAVE_NVGPRS(r1)
 
        /*
@@ -313,6 +316,10 @@ kvm_unsplit_wakeup:
 
 kvm_secondary_got_guest:
 
+       // About to go to guest, clear saved SRR1
+       li      r0, 0
+       std     r0, 32(r1)
+
        /* Set HSTATE_DSCR(r13) to something sensible */
        ld      r6, PACA_DSCR_DEFAULT(r13)
        std     r6, HSTATE_DSCR(r13)
@@ -392,13 +399,12 @@ kvm_no_guest:
        mfspr   r4, SPRN_LPCR
        rlwimi  r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
        mtspr   SPRN_LPCR, r4
-       /* set up r3 for return */
-       mfspr   r3,SPRN_SRR1
+       // Return SRR1 wakeup value, or 0 if we went into the guest
+       ld      r3, 32(r1)
        REST_NVGPRS(r1)
-       addi    r1, r1, STACK_FRAME_OVERHEAD
-       ld      r0, 16(r1)
-       ld      r5, 8(r1)
-       ld      r1, 0(r1)
+       ld      r1, 0(r1)       // Switch back to caller stack
+       ld      r0, 16(r1)      // Reload LR
+       ld      r5, 8(r1)       // Reload CR
        mtlr    r0
        mtcr    r5
        blr
index f9a3019e37b43cfffb66e23f882d6314dfd2ae39..c5ed9882383521ec65c30a96cbc09037e9624ae3 100644 (file)
@@ -228,6 +228,11 @@ bool is_offset_in_branch_range(long offset)
        return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3));
 }
 
+bool is_offset_in_cond_branch_range(long offset)
+{
+       return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3);
+}
+
 /*
  * Helper to check if a given instruction is a conditional branch
  * Derived from the conditional checks in analyse_instr()
@@ -280,7 +285,7 @@ int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
                offset = offset - (unsigned long)addr;
 
        /* Check we can represent the target in the instruction format */
-       if (offset < -0x8000 || offset > 0x7FFF || offset & 0x3)
+       if (!is_offset_in_cond_branch_range(offset))
                return 1;
 
        /* Mask out the flags and target, so they don't step on each other. */
index 99fad093f43ec105775d1983edfa85a37e0d1f5c..7e9b978b768ed967c0ed1ae1f32e6b56cb6e76d6 100644 (file)
 #define EMIT(instr)            PLANT_INSTR(image, ctx->idx, instr)
 
 /* Long jump; (unconditional 'branch') */
-#define PPC_JMP(dest)          EMIT(PPC_INST_BRANCH |                        \
-                                    (((dest) - (ctx->idx * 4)) & 0x03fffffc))
+#define PPC_JMP(dest)                                                        \
+       do {                                                                  \
+               long offset = (long)(dest) - (ctx->idx * 4);                  \
+               if (!is_offset_in_branch_range(offset)) {                     \
+                       pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx);                       \
+                       return -ERANGE;                                       \
+               }                                                             \
+               EMIT(PPC_INST_BRANCH | (offset & 0x03fffffc));                \
+       } while (0)
+
 /* blr; (unconditional 'branch' with link) to absolute address */
 #define PPC_BL_ABS(dest)       EMIT(PPC_INST_BL |                            \
                                     (((dest) - (unsigned long)(image + ctx->idx)) & 0x03fffffc))
 /* "cond" here covers BO:BI fields. */
-#define PPC_BCC_SHORT(cond, dest)      EMIT(PPC_INST_BRANCH_COND |           \
-                                            (((cond) & 0x3ff) << 16) |       \
-                                            (((dest) - (ctx->idx * 4)) &     \
-                                             0xfffc))
+#define PPC_BCC_SHORT(cond, dest)                                            \
+       do {                                                                  \
+               long offset = (long)(dest) - (ctx->idx * 4);                  \
+               if (!is_offset_in_cond_branch_range(offset)) {                \
+                       pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx);           \
+                       return -ERANGE;                                       \
+               }                                                             \
+               EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc));                                      \
+       } while (0)
+
 /* Sign-extended 32-bit immediate load */
 #define PPC_LI32(d, i)         do {                                          \
                if ((int)(uintptr_t)(i) >= -32768 &&                          \
 #define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0)
 #endif
 
-static inline bool is_nearbranch(int offset)
-{
-       return (offset < 32768) && (offset >= -32768);
-}
-
 /*
  * The fly in the ointment of code size changing from pass to pass is
  * avoided by padding the short branch case with a NOP.         If code size differs
@@ -91,7 +100,7 @@ static inline bool is_nearbranch(int offset)
  * state.
  */
 #define PPC_BCC(cond, dest)    do {                                          \
-               if (is_nearbranch((dest) - (ctx->idx * 4))) {                 \
+               if (is_offset_in_cond_branch_range((long)(dest) - (ctx->idx * 4))) {    \
                        PPC_BCC_SHORT(cond, dest);                            \
                        EMIT(PPC_RAW_NOP());                                  \
                } else {                                                      \
index 7b713edfa7e2617a9681a135233f078377b90e43..b63b35e45e558cbbff353893121171c69b1c925a 100644 (file)
  * with our redzone usage.
  *
  *             [       prev sp         ] <-------------
- *             [   nv gpr save area    ] 6*8           |
+ *             [   nv gpr save area    ] 5*8           |
  *             [    tail_call_cnt      ] 8             |
- *             [    local_tmp_var      ]             |
+ *             [    local_tmp_var      ] 16            |
  * fp (r31) -->        [   ebpf stack space    ] upto 512      |
  *             [     frame header      ] 32/112        |
  * sp (r1) --->        [    stack pointer      ] --------------
  */
 
 /* for gpr non volatile registers BPG_REG_6 to 10 */
-#define BPF_PPC_STACK_SAVE     (6*8)
+#define BPF_PPC_STACK_SAVE     (5*8)
 /* for bpf JIT code internal usage */
-#define BPF_PPC_STACK_LOCALS   16
+#define BPF_PPC_STACK_LOCALS   24
 /* stack frame excluding BPF stack, ensure this is quadword aligned */
 #define BPF_PPC_STACKFRAME     (STACK_FRAME_MIN_SIZE + \
                                 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
index 53aefee3fe70be6526699687ad7136e9e69b0c38..fcbf7a917c566e5f73b24da3770e30d871f4182c 100644 (file)
@@ -210,7 +210,11 @@ skip_init_ctx:
                /* Now build the prologue, body code & epilogue for real. */
                cgctx.idx = 0;
                bpf_jit_build_prologue(code_base, &cgctx);
-               bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass);
+               if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass)) {
+                       bpf_jit_binary_free(bpf_hdr);
+                       fp = org_fp;
+                       goto out_addrs;
+               }
                bpf_jit_build_epilogue(code_base, &cgctx);
 
                if (bpf_jit_enable > 1)
index beb12cbc8c29940993725ad04e3d575a91f135cd..0da31d41d4131068fa5543cbe0dbdd30051f13fb 100644 (file)
@@ -200,7 +200,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
        }
 }
 
-static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
+static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
 {
        /*
         * By now, the eBPF program has already setup parameters in r3-r6
@@ -261,7 +261,9 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
        bpf_jit_emit_common_epilogue(image, ctx);
 
        EMIT(PPC_RAW_BCTR());
+
        /* out: */
+       return 0;
 }
 
 /* Assemble the body code between the prologue & epilogue */
@@ -355,7 +357,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
                                PPC_LI32(_R0, imm);
                                EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, _R0));
                        }
-                       if (imm >= 0)
+                       if (imm >= 0 || (BPF_OP(code) == BPF_SUB && imm == 0x80000000))
                                EMIT(PPC_RAW_ADDZE(dst_reg_h, dst_reg_h));
                        else
                                EMIT(PPC_RAW_ADDME(dst_reg_h, dst_reg_h));
@@ -623,7 +625,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
                        EMIT(PPC_RAW_LI(dst_reg_h, 0));
                        break;
                case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
-                       EMIT(PPC_RAW_SRAW(dst_reg_h, dst_reg, src_reg));
+                       EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
                        break;
                case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
                        bpf_set_seen_register(ctx, tmp_reg);
@@ -1073,7 +1075,7 @@ cond_branch:
                                break;
                        case BPF_JMP32 | BPF_JSET | BPF_K:
                                /* andi does not sign-extend the immediate */
-                               if (imm >= -32768 && imm < 32768) {
+                               if (imm >= 0 && imm < 32768) {
                                        /* PPC_ANDI is _only/always_ dot-form */
                                        EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
                                } else {
@@ -1090,7 +1092,9 @@ cond_branch:
                 */
                case BPF_JMP | BPF_TAIL_CALL:
                        ctx->seen |= SEEN_TAILCALL;
-                       bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+                       ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+                       if (ret < 0)
+                               return ret;
                        break;
 
                default:
@@ -1103,7 +1107,7 @@ cond_branch:
                        return -EOPNOTSUPP;
                }
                if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext &&
-                   !insn_is_zext(&insn[i + 1]))
+                   !insn_is_zext(&insn[i + 1]) && !(BPF_OP(code) == BPF_END && imm == 64))
                        EMIT(PPC_RAW_LI(dst_reg_h, 0));
        }
 
index b87a63dba9c8fb5129898c078b17d1c9eb3406c1..8b5157ccfebae55e9a6e0e2e42a05497d661be38 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/if_vlan.h>
 #include <asm/kprobes.h>
 #include <linux/bpf.h>
+#include <asm/security_features.h>
 
 #include "bpf_jit64.h"
 
@@ -35,9 +36,9 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
  *             [       prev sp         ] <-------------
  *             [         ...           ]               |
  * sp (r1) --->        [    stack pointer      ] --------------
- *             [   nv gpr save area    ] 6*8
+ *             [   nv gpr save area    ] 5*8
  *             [    tail_call_cnt      ] 8
- *             [    local_tmp_var      ] 8
+ *             [    local_tmp_var      ] 16
  *             [   unused red zone     ] 208 bytes protected
  */
 static int bpf_jit_stack_local(struct codegen_context *ctx)
@@ -45,12 +46,12 @@ static int bpf_jit_stack_local(struct codegen_context *ctx)
        if (bpf_has_stack_frame(ctx))
                return STACK_FRAME_MIN_SIZE + ctx->stack_size;
        else
-               return -(BPF_PPC_STACK_SAVE + 16);
+               return -(BPF_PPC_STACK_SAVE + 24);
 }
 
 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
 {
-       return bpf_jit_stack_local(ctx) + 8;
+       return bpf_jit_stack_local(ctx) + 16;
 }
 
 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
@@ -206,7 +207,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
        EMIT(PPC_RAW_BCTRL());
 }
 
-static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
+static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
 {
        /*
         * By now, the eBPF program has already setup parameters in r3, r4 and r5
@@ -267,13 +268,38 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
        bpf_jit_emit_common_epilogue(image, ctx);
 
        EMIT(PPC_RAW_BCTR());
+
        /* out: */
+       return 0;
 }
 
+/*
+ * We spill into the redzone always, even if the bpf program has its own stackframe.
+ * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
+ */
+void bpf_stf_barrier(void);
+
+asm (
+"              .global bpf_stf_barrier         ;"
+"      bpf_stf_barrier:                        ;"
+"              std     21,-64(1)               ;"
+"              std     22,-56(1)               ;"
+"              sync                            ;"
+"              ld      21,-64(1)               ;"
+"              ld      22,-56(1)               ;"
+"              ori     31,31,0                 ;"
+"              .rept 14                        ;"
+"              b       1f                      ;"
+"      1:                                      ;"
+"              .endr                           ;"
+"              blr                             ;"
+);
+
 /* Assemble the body code between the prologue & epilogue */
 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
                       u32 *addrs, bool extra_pass)
 {
+       enum stf_barrier_type stf_barrier = stf_barrier_type_get();
        const struct bpf_insn *insn = fp->insnsi;
        int flen = fp->len;
        int i, ret;
@@ -328,18 +354,25 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
                        EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
                        goto bpf_alu32_trunc;
                case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
-               case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
                case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
+                       if (!imm) {
+                               goto bpf_alu32_trunc;
+                       } else if (imm >= -32768 && imm < 32768) {
+                               EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
+                       } else {
+                               PPC_LI32(b2p[TMP_REG_1], imm);
+                               EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
+                       }
+                       goto bpf_alu32_trunc;
+               case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
                case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
-                       if (BPF_OP(code) == BPF_SUB)
-                               imm = -imm;
-                       if (imm) {
-                               if (imm >= -32768 && imm < 32768)
-                                       EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
-                               else {
-                                       PPC_LI32(b2p[TMP_REG_1], imm);
-                                       EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
-                               }
+                       if (!imm) {
+                               goto bpf_alu32_trunc;
+                       } else if (imm > -32768 && imm <= 32768) {
+                               EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
+                       } else {
+                               PPC_LI32(b2p[TMP_REG_1], imm);
+                               EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
                        }
                        goto bpf_alu32_trunc;
                case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
@@ -389,8 +422,14 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
                case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
                        if (imm == 0)
                                return -EINVAL;
-                       else if (imm == 1)
-                               goto bpf_alu32_trunc;
+                       if (imm == 1) {
+                               if (BPF_OP(code) == BPF_DIV) {
+                                       goto bpf_alu32_trunc;
+                               } else {
+                                       EMIT(PPC_RAW_LI(dst_reg, 0));
+                                       break;
+                               }
+                       }
 
                        PPC_LI32(b2p[TMP_REG_1], imm);
                        switch (BPF_CLASS(code)) {
@@ -631,6 +670,29 @@ emit_clear:
                 * BPF_ST NOSPEC (speculation barrier)
                 */
                case BPF_ST | BPF_NOSPEC:
+                       if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
+                                       !security_ftr_enabled(SEC_FTR_STF_BARRIER))
+                               break;
+
+                       switch (stf_barrier) {
+                       case STF_BARRIER_EIEIO:
+                               EMIT(PPC_RAW_EIEIO() | 0x02000000);
+                               break;
+                       case STF_BARRIER_SYNC_ORI:
+                               EMIT(PPC_RAW_SYNC());
+                               EMIT(PPC_RAW_LD(b2p[TMP_REG_1], _R13, 0));
+                               EMIT(PPC_RAW_ORI(_R31, _R31, 0));
+                               break;
+                       case STF_BARRIER_FALLBACK:
+                               EMIT(PPC_RAW_MFLR(b2p[TMP_REG_1]));
+                               PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier));
+                               EMIT(PPC_RAW_MTCTR(12));
+                               EMIT(PPC_RAW_BCTRL());
+                               EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
+                               break;
+                       case STF_BARRIER_NONE:
+                               break;
+                       }
                        break;
 
                /*
@@ -993,7 +1055,9 @@ cond_branch:
                 */
                case BPF_JMP | BPF_TAIL_CALL:
                        ctx->seen |= SEEN_TAILCALL;
-                       bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+                       ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+                       if (ret < 0)
+                               return ret;
                        break;
 
                default:
index bc15200852b7c3059dd44060a7ebc87e860f4c4f..09fafcf2d3a060d9ba1e28658447d659bc993819 100644 (file)
@@ -867,6 +867,10 @@ static int __init eeh_pseries_init(void)
        if (is_kdump_kernel() || reset_devices) {
                pr_info("Issue PHB reset ...\n");
                list_for_each_entry(phb, &hose_list, list_node) {
+                       // Skip if the slot is empty
+                       if (list_empty(&PCI_DN(phb->dn)->child_list))
+                               continue;
+
                        pdn = list_first_entry(&PCI_DN(phb->dn)->child_list, struct pci_dn, list);
                        config_addr = pseries_eeh_get_pe_config_addr(pdn);
 
index 1b305e4118622af70561daf8887b19ff2dcf101f..8627362f613ee51d931246731f7a85281ca2af3b 100644 (file)
@@ -507,12 +507,27 @@ static void pseries_msi_unmask(struct irq_data *d)
        irq_chip_unmask_parent(d);
 }
 
+static void pseries_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
+{
+       struct msi_desc *entry = irq_data_get_msi_desc(data);
+
+       /*
+        * Do not update the MSIx vector table. It's not strictly necessary
+        * because the table is initialized by the underlying hypervisor, PowerVM
+        * or QEMU/KVM. However, if the MSIx vector entry is cleared, any further
+        * activation will fail. This can happen in some drivers (eg. IPR) which
+        * deactivate an IRQ used for testing MSI support.
+        */
+       entry->msg = *msg;
+}
+
 static struct irq_chip pseries_pci_msi_irq_chip = {
        .name           = "pSeries-PCI-MSI",
        .irq_shutdown   = pseries_msi_shutdown,
        .irq_mask       = pseries_msi_mask,
        .irq_unmask     = pseries_msi_unmask,
        .irq_eoi        = irq_chip_eoi_parent,
+       .irq_write_msi_msg      = pseries_msi_write_msg,
 };
 
 static struct msi_domain_info pseries_msi_domain_info = {
index c732ce5a3e1a500746feb41649d78b8a164ae354..c5d75c02ad8b512d51b13eb0c4a7baf7bec3524e 100644 (file)
@@ -945,7 +945,8 @@ static int xive_get_irqchip_state(struct irq_data *data,
                 * interrupt to be inactive in that case.
                 */
                *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p &&
-                       (xd->saved_p || !!(pq & XIVE_ESB_VAL_P));
+                       (xd->saved_p || (!!(pq & XIVE_ESB_VAL_P) &&
+                        !irqd_irq_disabled(data)));
                return 0;
        default:
                return -EINVAL;
index 301a54233c7e26a598eb63f4a75aad9301c12141..6a6fa9e976d598b04dcb95febbac95d7119349d5 100644 (file)
@@ -561,5 +561,3 @@ menu "Power management options"
 source "kernel/power/Kconfig"
 
 endmenu
-
-source "drivers/firmware/Kconfig"
index b933b1583c9fd24693cce86a3c7244b7b857c454..34fbb3ea21d5bb259b0c6c93aa2d36ebaabb67e4 100644 (file)
@@ -82,4 +82,5 @@ static inline int syscall_get_arch(struct task_struct *task)
 #endif
 }
 
+asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
 #endif /* _ASM_RISCV_SYSCALL_H */
index 893e47195e3055c341e3980ad023e83d6d28025e..208e31bc5d1c28e2b728400a035f92034013b6df 100644 (file)
 #ifdef CONFIG_MMU
 
 #include <linux/types.h>
-#include <generated/vdso-offsets.h>
+/*
+ * All systems with an MMU have a VDSO, but systems without an MMU don't
+ * support shared libraries and therefor don't have one.
+ */
+#ifdef CONFIG_MMU
+
+#define __VVAR_PAGES    1
 
-#ifndef CONFIG_GENERIC_TIME_VSYSCALL
-struct vdso_data {
-};
-#endif
+#ifndef __ASSEMBLY__
+#include <generated/vdso-offsets.h>
 
 #define VDSO_SYMBOL(base, name)                                                        \
        (void __user *)((unsigned long)(base) + __vdso_##name##_offset)
 
 #endif /* CONFIG_MMU */
 
-asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_MMU */
 
 #endif /* _ASM_RISCV_VDSO_H */
index 4b989ae15d59f7b8e4765e3ad93b07a2923b2e6e..8062996c2dfd077eca892b3ce7bc0fa8a91f2281 100644 (file)
 #ifdef __LP64__
 #define __ARCH_WANT_NEW_STAT
 #define __ARCH_WANT_SET_GET_RLIMIT
-#define __ARCH_WANT_SYS_CLONE3
 #endif /* __LP64__ */
 
+#define __ARCH_WANT_SYS_CLONE3
+
 #include <asm-generic/unistd.h>
 
 /*
index a63c667c27b35cdcf028e44b3394ba14b6abb1e9..44b1420a22705a81eca174d748bfc2d2986302d7 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/linkage.h>
 #include <linux/syscalls.h>
 #include <asm-generic/syscalls.h>
-#include <asm/vdso.h>
 #include <asm/syscall.h>
 
 #undef __SYSCALL
index 25a3b8849599173846378f4aa50907a9b878beed..b70956d8040812423cf5c13ed03a6f3f629ae34b 100644 (file)
 #include <linux/binfmts.h>
 #include <linux/err.h>
 #include <asm/page.h>
+#include <asm/vdso.h>
+
 #ifdef CONFIG_GENERIC_TIME_VSYSCALL
 #include <vdso/datapage.h>
 #else
-#include <asm/vdso.h>
+struct vdso_data {
+};
 #endif
 
 extern char vdso_start[], vdso_end[];
 
+enum vvar_pages {
+       VVAR_DATA_PAGE_OFFSET,
+       VVAR_NR_PAGES,
+};
+
+#define VVAR_SIZE  (VVAR_NR_PAGES << PAGE_SHIFT)
+
 static unsigned int vdso_pages __ro_after_init;
 static struct page **vdso_pagelist __ro_after_init;
 
@@ -38,7 +48,7 @@ static int __init vdso_init(void)
 
        vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
        vdso_pagelist =
-               kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
+               kcalloc(vdso_pages + VVAR_NR_PAGES, sizeof(struct page *), GFP_KERNEL);
        if (unlikely(vdso_pagelist == NULL)) {
                pr_err("vdso: pagelist allocation failed\n");
                return -ENOMEM;
@@ -63,38 +73,41 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
        unsigned long vdso_base, vdso_len;
        int ret;
 
-       vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
+       BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
+
+       vdso_len = (vdso_pages + VVAR_NR_PAGES) << PAGE_SHIFT;
+
+       if (mmap_write_lock_killable(mm))
+               return -EINTR;
 
-       mmap_write_lock(mm);
        vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
        if (IS_ERR_VALUE(vdso_base)) {
                ret = vdso_base;
                goto end;
        }
 
-       /*
-        * Put vDSO base into mm struct. We need to do this before calling
-        * install_special_mapping or the perf counter mmap tracking code
-        * will fail to recognise it as a vDSO (since arch_vma_name fails).
-        */
-       mm->context.vdso = (void *)vdso_base;
+       mm->context.vdso = NULL;
+       ret = install_special_mapping(mm, vdso_base, VVAR_SIZE,
+               (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
+       if (unlikely(ret))
+               goto end;
 
        ret =
-          install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
+          install_special_mapping(mm, vdso_base + VVAR_SIZE,
+               vdso_pages << PAGE_SHIFT,
                (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
                vdso_pagelist);
 
-       if (unlikely(ret)) {
-               mm->context.vdso = NULL;
+       if (unlikely(ret))
                goto end;
-       }
 
-       vdso_base += (vdso_pages << PAGE_SHIFT);
-       ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
-               (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
+       /*
+        * Put vDSO base into mm struct. We need to do this before calling
+        * install_special_mapping or the perf counter mmap tracking code
+        * will fail to recognise it as a vDSO (since arch_vma_name fails).
+        */
+       mm->context.vdso = (void *)vdso_base + VVAR_SIZE;
 
-       if (unlikely(ret))
-               mm->context.vdso = NULL;
 end:
        mmap_write_unlock(mm);
        return ret;
@@ -105,7 +118,7 @@ const char *arch_vma_name(struct vm_area_struct *vma)
        if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
                return "[vdso]";
        if (vma->vm_mm && (vma->vm_start ==
-                          (long)vma->vm_mm->context.vdso + PAGE_SIZE))
+                          (long)vma->vm_mm->context.vdso - VVAR_SIZE))
                return "[vdso_data]";
        return NULL;
 }
index e6f558bca71bb2128e6f1952b97e78a16cc09777..e9111f700af08062850cca8ba9e1684749aaa2fa 100644 (file)
@@ -3,12 +3,13 @@
  * Copyright (C) 2012 Regents of the University of California
  */
 #include <asm/page.h>
+#include <asm/vdso.h>
 
 OUTPUT_ARCH(riscv)
 
 SECTIONS
 {
-       PROVIDE(_vdso_data = . + PAGE_SIZE);
+       PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
        . = SIZEOF_HEADERS;
 
        .hash           : { *(.hash) }                  :text
index 094118663285d545c101ac518a3b517bcec2ca8b..89f81067e09edba0c9cacdafd41cc97ea2b0e19e 100644 (file)
@@ -16,6 +16,8 @@ static void ipi_remote_fence_i(void *info)
 
 void flush_icache_all(void)
 {
+       local_flush_icache_all();
+
        if (IS_ENABLED(CONFIG_RISCV_SBI))
                sbi_remote_fence_i(NULL);
        else
index e4803ec51110c65252f14e82bcb6e089661eb5cc..6b3c366af78eb5e9608cdd732d25155c9e05fb9a 100644 (file)
@@ -207,6 +207,8 @@ int zpci_enable_device(struct zpci_dev *);
 int zpci_disable_device(struct zpci_dev *);
 int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh);
 int zpci_deconfigure_device(struct zpci_dev *zdev);
+void zpci_device_reserved(struct zpci_dev *zdev);
+bool zpci_is_device_configured(struct zpci_dev *zdev);
 
 int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
 int zpci_unregister_ioat(struct zpci_dev *, u8);
index b9f85b2dc053f55e2c98d76b8b767ec6851102ba..6af59c59cc1b8da930f068df7d87876e060cbc26 100644 (file)
@@ -894,6 +894,11 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
 
 /**
  * guest_translate_address - translate guest logical into guest absolute address
+ * @vcpu: virtual cpu
+ * @gva: Guest virtual address
+ * @ar: Access register
+ * @gpa: Guest physical address
+ * @mode: Translation access mode
  *
  * Parameter semantics are the same as the ones from guest_translate.
  * The memory contents at the guest address are not changed.
@@ -934,6 +939,11 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
 
 /**
  * check_gva_range - test a range of guest virtual addresses for accessibility
+ * @vcpu: virtual cpu
+ * @gva: Guest virtual address
+ * @ar: Access register
+ * @length: Length of test range
+ * @mode: Translation access mode
  */
 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
                    unsigned long length, enum gacc_mode mode)
@@ -956,6 +966,7 @@ int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
 
 /**
  * kvm_s390_check_low_addr_prot_real - check for low-address protection
+ * @vcpu: virtual cpu
  * @gra: Guest real address
  *
  * Checks whether an address is subject to low-address protection and set
@@ -979,6 +990,7 @@ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
  * @pgt: pointer to the beginning of the page table for the given address if
  *      successful (return value 0), or to the first invalid DAT entry in
  *      case of exceptions (return value > 0)
+ * @dat_protection: referenced memory is write protected
  * @fake: pgt references contiguous guest memory block, not a pgtable
  */
 static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
index 72b25b7cc6aecb6655b898ade7e53591f1df262a..2bd8f854f1b41b23906deaed053e82192834d570 100644 (file)
@@ -269,6 +269,7 @@ static int handle_prog(struct kvm_vcpu *vcpu)
 
 /**
  * handle_external_interrupt - used for external interruption interceptions
+ * @vcpu: virtual cpu
  *
  * This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if
  * the new PSW does not have external interrupts disabled. In the first case,
@@ -315,7 +316,8 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
 }
 
 /**
- * Handle MOVE PAGE partial execution interception.
+ * handle_mvpg_pei - Handle MOVE PAGE partial execution interception.
+ * @vcpu: virtual cpu
  *
  * This interception can only happen for guests with DAT disabled and
  * addresses that are currently not mapped in the host. Thus we try to
index 16256e17a544a84f4dee803048b063d23f4c8f8e..10722455fd02e9b2f81f7f53af238f13acd3a4d9 100644 (file)
@@ -419,13 +419,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
 {
        kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
-       set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
+       set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
 }
 
 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
 {
        kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
-       clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
+       clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
 }
 
 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
index 752a0ffab9bf10c5f0e4cd10d47bb4b5e38f7c09..6a6dd5e1daf63999e895767789cd4eb05173ac31 100644 (file)
@@ -4066,7 +4066,7 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
                kvm_s390_patch_guest_per_regs(vcpu);
        }
 
-       clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.gisa_int.kicked_mask);
+       clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
 
        vcpu->arch.sie_block->icptcode = 0;
        cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
index ecd741ee3276e83a4f69f59d4a09e0ebe4072e58..52bc8fbaa60ac5bc12e8eec4f99f2bb6f2d40b82 100644 (file)
@@ -79,7 +79,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
 
 static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
 {
-       return test_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
+       return test_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
 }
 
 static inline int kvm_is_ucontrol(struct kvm *kvm)
index cfcdf76d6a9573be23b9482504c054d9f6816285..a95ca6df4e5e6902fef19f87da7e6a6754b80b62 100644 (file)
@@ -259,14 +259,13 @@ EXPORT_SYMBOL(strcmp);
 #ifdef __HAVE_ARCH_STRRCHR
 char *strrchr(const char *s, int c)
 {
-       size_t len = __strend(s) - s;
-
-       if (len)
-              do {
-                      if (s[len] == (char) c)
-                              return (char *) s + len;
-              } while (--len > 0);
-       return NULL;
+       ssize_t len = __strend(s) - s;
+
+       do {
+               if (s[len] == (char)c)
+                       return (char *)s + len;
+       } while (--len >= 0);
+       return NULL;
 }
 EXPORT_SYMBOL(strrchr);
 #endif
index 840d8594437d5865f8a9fcbd1de1a1773d7a200d..1a374d021e256d90ad06e77a1d945509ee902c2f 100644 (file)
@@ -1826,7 +1826,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
        jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
        if (jit.addrs == NULL) {
                fp = orig_fp;
-               goto out;
+               goto free_addrs;
        }
        /*
         * Three initial passes:
index e7e6788d75a864605a99ff34d56393d1a9b35a5b..b833155ce838111981b424418703c584c47c8771 100644 (file)
@@ -92,7 +92,7 @@ void zpci_remove_reserved_devices(void)
        spin_unlock(&zpci_list_lock);
 
        list_for_each_entry_safe(zdev, tmp, &remove, entry)
-               zpci_zdev_put(zdev);
+               zpci_device_reserved(zdev);
 }
 
 int pci_domain_nr(struct pci_bus *bus)
@@ -751,6 +751,14 @@ error:
        return ERR_PTR(rc);
 }
 
+bool zpci_is_device_configured(struct zpci_dev *zdev)
+{
+       enum zpci_state state = zdev->state;
+
+       return state != ZPCI_FN_STATE_RESERVED &&
+               state != ZPCI_FN_STATE_STANDBY;
+}
+
 /**
  * zpci_scan_configured_device() - Scan a freshly configured zpci_dev
  * @zdev: The zpci_dev to be configured
@@ -822,6 +830,31 @@ int zpci_deconfigure_device(struct zpci_dev *zdev)
        return 0;
 }
 
+/**
+ * zpci_device_reserved() - Mark device as resverved
+ * @zdev: the zpci_dev that was reserved
+ *
+ * Handle the case that a given zPCI function was reserved by another system.
+ * After a call to this function the zpci_dev can not be found via
+ * get_zdev_by_fid() anymore but may still be accessible via existing
+ * references though it will not be functional anymore.
+ */
+void zpci_device_reserved(struct zpci_dev *zdev)
+{
+       if (zdev->has_hp_slot)
+               zpci_exit_slot(zdev);
+       /*
+        * Remove device from zpci_list as it is going away. This also
+        * makes sure we ignore subsequent zPCI events for this device.
+        */
+       spin_lock(&zpci_list_lock);
+       list_del(&zdev->entry);
+       spin_unlock(&zpci_list_lock);
+       zdev->state = ZPCI_FN_STATE_RESERVED;
+       zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
+       zpci_zdev_put(zdev);
+}
+
 void zpci_release_device(struct kref *kref)
 {
        struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
@@ -843,6 +876,12 @@ void zpci_release_device(struct kref *kref)
        case ZPCI_FN_STATE_STANDBY:
                if (zdev->has_hp_slot)
                        zpci_exit_slot(zdev);
+               spin_lock(&zpci_list_lock);
+               list_del(&zdev->entry);
+               spin_unlock(&zpci_list_lock);
+               zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
+               fallthrough;
+       case ZPCI_FN_STATE_RESERVED:
                if (zdev->has_resources)
                        zpci_cleanup_bus_resources(zdev);
                zpci_bus_device_unregister(zdev);
@@ -851,10 +890,6 @@ void zpci_release_device(struct kref *kref)
        default:
                break;
        }
-
-       spin_lock(&zpci_list_lock);
-       list_del(&zdev->entry);
-       spin_unlock(&zpci_list_lock);
        zpci_dbg(3, "rem fid:%x\n", zdev->fid);
        kfree(zdev);
 }
index c856f80cb21b880d233fe6c6ec3c93ab4fcb688c..5b8d647523f969abe8fc0aa792e9390f631fe828 100644 (file)
@@ -140,7 +140,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
                        /* The 0x0304 event may immediately reserve the device */
                        if (!clp_get_state(zdev->fid, &state) &&
                            state == ZPCI_FN_STATE_RESERVED) {
-                               zpci_zdev_put(zdev);
+                               zpci_device_reserved(zdev);
                        }
                }
                break;
@@ -151,7 +151,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
        case 0x0308: /* Standby -> Reserved */
                if (!zdev)
                        break;
-               zpci_zdev_put(zdev);
+               zpci_device_reserved(zdev);
                break;
        default:
                break;
index ab83c22d274e7161b82fdf6ff2a8c892e2eff3f9..d9830e7e1060f7c38697904d0a04cfd9a888ccd7 100644 (file)
@@ -1405,7 +1405,7 @@ config HIGHMEM4G
 
 config HIGHMEM64G
        bool "64GB"
-       depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
+       depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !MWINCHIP3D && !MK6
        select X86_PAE
        help
          Select this if you have a 32-bit processor and more than 4
@@ -1525,7 +1525,6 @@ config AMD_MEM_ENCRYPT
 
 config AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT
        bool "Activate AMD Secure Memory Encryption (SME) by default"
-       default y
        depends on AMD_MEM_ENCRYPT
        help
          Say yes to have system memory encrypted by default if running on
@@ -2832,8 +2831,6 @@ config HAVE_ATOMIC_IOMAP
        def_bool y
        depends on X86_32
 
-source "drivers/firmware/Kconfig"
-
 source "arch/x86/kvm/Kconfig"
 
 source "arch/x86/Kconfig.assembler"
index fa2c3f50aecbdfb118a8f8b154751047ca9f4d3a..18d2f51991944a3635d48327add9c06359e908ba 100644 (file)
@@ -367,10 +367,11 @@ SYM_FUNC_START(sm4_aesni_avx_crypt8)
         *      %rdx: src (1..8 blocks)
         *      %rcx: num blocks (1..8)
         */
-       FRAME_BEGIN
-
        cmpq $5, %rcx;
        jb sm4_aesni_avx_crypt4;
+
+       FRAME_BEGIN
+
        vmovdqu (0 * 16)(%rdx), RA0;
        vmovdqu (1 * 16)(%rdx), RA1;
        vmovdqu (2 * 16)(%rdx), RA2;
index 2a57dbed48945c0c4d0c437f34b4e2737114cac4..6dfa8ddaa60f7615beef94d8c4b1ce0fcf32d857 100644 (file)
@@ -2465,6 +2465,7 @@ static int x86_pmu_event_init(struct perf_event *event)
        if (err) {
                if (event->destroy)
                        event->destroy(event);
+               event->destroy = NULL;
        }
 
        if (READ_ONCE(x86_pmu.attr_rdpmc) &&
index 7011e87be6d030fafedf364e1284d69eba45027a..9a044438072ba2190dfa1453d18483b0815b8b9a 100644 (file)
@@ -263,6 +263,7 @@ static struct event_constraint intel_icl_event_constraints[] = {
        INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
        INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
        INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
+       INTEL_EVENT_CONSTRAINT(0xef, 0xf),
        INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
        EVENT_CONSTRAINT_END
 };
index c853b28efa334546a104c4dbb25c43a0665faa6f..96c775abe31ff7ebbf0fd39b1c6e65395b78af96 100644 (file)
@@ -68,6 +68,7 @@ static bool test_intel(int idx, void *data)
        case INTEL_FAM6_BROADWELL_D:
        case INTEL_FAM6_BROADWELL_G:
        case INTEL_FAM6_BROADWELL_X:
+       case INTEL_FAM6_SAPPHIRERAPIDS_X:
 
        case INTEL_FAM6_ATOM_SILVERMONT:
        case INTEL_FAM6_ATOM_SILVERMONT_D:
index 32a1ad356c183f35873cf37926edda5c79c63734..db2d92fb44daf820402f90bd4d320cb5c133477a 100644 (file)
@@ -122,17 +122,27 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
        ipi_arg->reserved = 0;
        ipi_arg->vp_set.valid_bank_mask = 0;
 
-       if (!cpumask_equal(mask, cpu_present_mask)) {
+       /*
+        * Use HV_GENERIC_SET_ALL and avoid converting cpumask to VP_SET
+        * when the IPI is sent to all currently present CPUs.
+        */
+       if (!cpumask_equal(mask, cpu_present_mask) || exclude_self) {
                ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
                if (exclude_self)
                        nr_bank = cpumask_to_vpset_noself(&(ipi_arg->vp_set), mask);
                else
                        nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
-       }
-       if (nr_bank < 0)
-               goto ipi_mask_ex_done;
-       if (!nr_bank)
+
+               /*
+                * 'nr_bank <= 0' means some CPUs in cpumask can't be
+                * represented in VP_SET. Return an error and fall back to
+                * native (architectural) method of sending IPIs.
+                */
+               if (nr_bank <= 0)
+                       goto ipi_mask_ex_done;
+       } else {
                ipi_arg->vp_set.format = HV_GENERIC_SET_ALL;
+       }
 
        status = hv_do_rep_hypercall(HVCALL_SEND_IPI_EX, 0, nr_bank,
                              ipi_arg, NULL);
index 14ebd21965691d17ea763e5f62494cf780946cae..43184640b579a45565919801e8a23a2301e69017 100644 (file)
@@ -25,7 +25,7 @@ static __always_inline void arch_check_user_regs(struct pt_regs *regs)
                 * For !SMAP hardware we patch out CLAC on entry.
                 */
                if (boot_cpu_has(X86_FEATURE_SMAP) ||
-                   (IS_ENABLED(CONFIG_64_BIT) && boot_cpu_has(X86_FEATURE_XENPV)))
+                   (IS_ENABLED(CONFIG_64BIT) && boot_cpu_has(X86_FEATURE_XENPV)))
                        mask |= X86_EFLAGS_AC;
 
                WARN_ON_ONCE(flags & mask);
index f8f48a7ec577f64d2af855f58e0467768f091ccc..5a0298aa56ba8cf3c582bfdad40ed55a1b0a68d4 100644 (file)
@@ -702,7 +702,8 @@ struct kvm_vcpu_arch {
 
        struct kvm_pio_request pio;
        void *pio_data;
-       void *guest_ins_data;
+       void *sev_pio_data;
+       unsigned sev_pio_count;
 
        u8 event_exit_inst_len;
 
index 87bd6025d91d4bb22df73bd821072904b337a5f3..6a5f3acf2b331291be6b5a2a1600361b9b2f2393 100644 (file)
@@ -46,7 +46,7 @@ struct kvm_page_track_notifier_node {
                            struct kvm_page_track_notifier_node *node);
 };
 
-void kvm_page_track_init(struct kvm *kvm);
+int kvm_page_track_init(struct kvm *kvm);
 void kvm_page_track_cleanup(struct kvm *kvm);
 
 void kvm_page_track_free_memslot(struct kvm_memory_slot *slot);
index eceea929909740613471b74567d8e24413b57339..6c576519210280483b93f67d2101aa4f1d26445d 100644 (file)
@@ -2,6 +2,20 @@
 #ifndef _ASM_X86_KVM_CLOCK_H
 #define _ASM_X86_KVM_CLOCK_H
 
+#include <linux/percpu.h>
+
 extern struct clocksource kvm_clock;
 
+DECLARE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
+
+static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
+{
+       return &this_cpu_read(hv_clock_per_cpu)->pvti;
+}
+
+static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
+{
+       return this_cpu_read(hv_clock_per_cpu);
+}
+
 #endif /* _ASM_X86_KVM_CLOCK_H */
index 3506d8c598c18b78596816ab99e98a5ab99d4ba6..4557f7cb0fa6ea175a99cb801886ecc273d9a16f 100644 (file)
@@ -14,16 +14,19 @@ static inline int pci_xen_hvm_init(void)
        return -1;
 }
 #endif
-#if defined(CONFIG_XEN_DOM0)
+#ifdef CONFIG_XEN_PV_DOM0
 int __init pci_xen_initial_domain(void);
-int xen_find_device_domain_owner(struct pci_dev *dev);
-int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
-int xen_unregister_device_domain_owner(struct pci_dev *dev);
 #else
 static inline int __init pci_xen_initial_domain(void)
 {
        return -1;
 }
+#endif
+#ifdef CONFIG_XEN_DOM0
+int xen_find_device_domain_owner(struct pci_dev *dev);
+int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
+int xen_unregister_device_domain_owner(struct pci_dev *dev);
+#else
 static inline int xen_find_device_domain_owner(struct pci_dev *dev)
 {
        return -1;
index 0f8885949e8c4b602fefc2eaf918d11a2957c051..b3410f1ac21754c9ff94338a71da12e812d65686 100644 (file)
@@ -326,6 +326,7 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
 #ifdef CONFIG_X86_SMAP
                cr4_set_bits(X86_CR4_SMAP);
 #else
+               clear_cpu_cap(c, X86_FEATURE_SMAP);
                cr4_clear_bits(X86_CR4_SMAP);
 #endif
        }
index 4b8813bafffdcf87cde891df773b53ed2f702de7..bb1c3f5f60c81d3969e0c84e269f3f4883dd0467 100644 (file)
@@ -527,12 +527,14 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
        rdt_domain_reconfigure_cdp(r);
 
        if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
-               kfree(d);
+               kfree(hw_dom);
                return;
        }
 
        if (r->mon_capable && domain_setup_mon_state(r, d)) {
-               kfree(d);
+               kfree(hw_dom->ctrl_val);
+               kfree(hw_dom->mbps_val);
+               kfree(hw_dom);
                return;
        }
 
index 38837dad46e629052957a0d837c928e62045a8b0..391a4e2b86049ecf012be5593d281ce28daa5147 100644 (file)
@@ -714,12 +714,6 @@ static struct chipset early_qrk[] __initdata = {
         */
        { PCI_VENDOR_ID_INTEL, 0x0f00,
                PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
-       { PCI_VENDOR_ID_INTEL, 0x3e20,
-               PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
-       { PCI_VENDOR_ID_INTEL, 0x3ec4,
-               PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
-       { PCI_VENDOR_ID_INTEL, 0x8a12,
-               PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
        { PCI_VENDOR_ID_BROADCOM, 0x4331,
          PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
        {}
index 445c57c9c5397f9b1af0f94efb4da614ba8f6080..831b25c5e70581aeb490e951a7bb8842a5dbc0be 100644 (file)
@@ -379,9 +379,14 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
                                     sizeof(fpu->state.fxsave)))
                        return -EFAULT;
 
-               /* Reject invalid MXCSR values. */
-               if (fpu->state.fxsave.mxcsr & ~mxcsr_feature_mask)
-                       return -EINVAL;
+               if (IS_ENABLED(CONFIG_X86_64)) {
+                       /* Reject invalid MXCSR values. */
+                       if (fpu->state.fxsave.mxcsr & ~mxcsr_feature_mask)
+                               return -EINVAL;
+               } else {
+                       /* Mask invalid bits out for historical reasons (broken hardware). */
+                       fpu->state.fxsave.mxcsr &= mxcsr_feature_mask;
+               }
 
                /* Enforce XFEATURE_MASK_FPSSE when XSAVE is enabled */
                if (use_xsave())
index 42fc41dd0e1f17058724bbba75dfa1ede83994c8..882213df37130249b8712322810e5cbec3b92785 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/irq_remapping.h>
 #include <asm/hpet.h>
 #include <asm/time.h>
+#include <asm/mwait.h>
 
 #undef  pr_fmt
 #define pr_fmt(fmt) "hpet: " fmt
@@ -916,6 +917,83 @@ static bool __init hpet_counting(void)
        return false;
 }
 
+static bool __init mwait_pc10_supported(void)
+{
+       unsigned int eax, ebx, ecx, mwait_substates;
+
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+               return false;
+
+       if (!cpu_feature_enabled(X86_FEATURE_MWAIT))
+               return false;
+
+       if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+               return false;
+
+       cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
+
+       return (ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) &&
+              (ecx & CPUID5_ECX_INTERRUPT_BREAK) &&
+              (mwait_substates & (0xF << 28));
+}
+
+/*
+ * Check whether the system supports PC10. If so force disable HPET as that
+ * stops counting in PC10. This check is overbroad as it does not take any
+ * of the following into account:
+ *
+ *     - ACPI tables
+ *     - Enablement of intel_idle
+ *     - Command line arguments which limit intel_idle C-state support
+ *
+ * That's perfectly fine. HPET is a piece of hardware designed by committee
+ * and the only reasons why it is still in use on modern systems is the
+ * fact that it is impossible to reliably query TSC and CPU frequency via
+ * CPUID or firmware.
+ *
+ * If HPET is functional it is useful for calibrating TSC, but this can be
+ * done via PMTIMER as well which seems to be the last remaining timer on
+ * X86/INTEL platforms that has not been completely wreckaged by feature
+ * creep.
+ *
+ * In theory HPET support should be removed altogether, but there are older
+ * systems out there which depend on it because TSC and APIC timer are
+ * dysfunctional in deeper C-states.
+ *
+ * It's only 20 years now that hardware people have been asked to provide
+ * reliable and discoverable facilities which can be used for timekeeping
+ * and per CPU timer interrupts.
+ *
+ * The probability that this problem is going to be solved in the
+ * forseeable future is close to zero, so the kernel has to be cluttered
+ * with heuristics to keep up with the ever growing amount of hardware and
+ * firmware trainwrecks. Hopefully some day hardware people will understand
+ * that the approach of "This can be fixed in software" is not sustainable.
+ * Hope dies last...
+ */
+static bool __init hpet_is_pc10_damaged(void)
+{
+       unsigned long long pcfg;
+
+       /* Check whether PC10 substates are supported */
+       if (!mwait_pc10_supported())
+               return false;
+
+       /* Check whether PC10 is enabled in PKG C-state limit */
+       rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, pcfg);
+       if ((pcfg & 0xF) < 8)
+               return false;
+
+       if (hpet_force_user) {
+               pr_warn("HPET force enabled via command line, but dysfunctional in PC10.\n");
+               return false;
+       }
+
+       pr_info("HPET dysfunctional in PC10. Force disabled.\n");
+       boot_hpet_disable = true;
+       return true;
+}
+
 /**
  * hpet_enable - Try to setup the HPET timer. Returns 1 on success.
  */
@@ -929,6 +1007,9 @@ int __init hpet_enable(void)
        if (!is_hpet_capable())
                return 0;
 
+       if (hpet_is_pc10_damaged())
+               return 0;
+
        hpet_set_mapping();
        if (!hpet_virt_address)
                return 0;
index ad273e5861c1b267cb815fdd54969c2b26ffc90d..73c74b961d0fd9b69b6dd0f9fab50b312b5fccd6 100644 (file)
@@ -49,18 +49,9 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
 static struct pvclock_vsyscall_time_info
                        hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
 static struct pvclock_wall_clock wall_clock __bss_decrypted;
-static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
 static struct pvclock_vsyscall_time_info *hvclock_mem;
-
-static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
-{
-       return &this_cpu_read(hv_clock_per_cpu)->pvti;
-}
-
-static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
-{
-       return this_cpu_read(hv_clock_per_cpu);
-}
+DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
+EXPORT_PER_CPU_SYMBOL_GPL(hv_clock_per_cpu);
 
 /*
  * The wallclock is the time of day when we booted. Since then, some time may
index 9f90f460a28cc0f49b161188933dba804249acdf..bf1033a62e4806a64a430075fc5b06540f554010 100644 (file)
@@ -130,6 +130,8 @@ static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
                } else {
                        ret = ES_VMM_ERROR;
                }
+       } else if (ghcb->save.sw_exit_info_1 & 0xffffffff) {
+               ret = ES_VMM_ERROR;
        } else {
                ret = ES_OK;
        }
index fe03bd978761eb34a1b5230303236c5e746a807d..751aa85a300129e9489e8c6ed61a1e4ff35117e0 100644 (file)
@@ -65,8 +65,8 @@ static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
        for (i = 0; i < nent; i++) {
                e = &entries[i];
 
-               if (e->function == function && (e->index == index ||
-                   !(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX)))
+               if (e->function == function &&
+                   (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index))
                        return e;
        }
 
index 2837110e66eda5952d8110f874e158c2a4bcf067..9a144ca8e146054209c446d9e66e426fca0334ee 100644 (file)
@@ -435,7 +435,6 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
        __FOP_RET(#op)
 
 asm(".pushsection .fixup, \"ax\"\n"
-    ".global kvm_fastop_exception \n"
     "kvm_fastop_exception: xor %esi, %esi; ret\n"
     ".popsection");
 
@@ -4206,7 +4205,7 @@ static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
        u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
 
        if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
-               return emulate_ud(ctxt);
+               return emulate_gp(ctxt, 0);
 
        return X86EMUL_CONTINUE;
 }
index 232a86a6faaf921845262d14163c696cc046be78..d5124b520f761cae5c602e6e7a7c0ca484ce7927 100644 (file)
@@ -939,7 +939,7 @@ static int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
        for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
                stimer_init(&hv_vcpu->stimer[i], i);
 
-       hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
+       hv_vcpu->vp_index = vcpu->vcpu_idx;
 
        return 0;
 }
@@ -1444,7 +1444,6 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
        switch (msr) {
        case HV_X64_MSR_VP_INDEX: {
                struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
-               int vcpu_idx = kvm_vcpu_get_idx(vcpu);
                u32 new_vp_index = (u32)data;
 
                if (!host || new_vp_index >= KVM_MAX_VCPUS)
@@ -1459,9 +1458,9 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
                 * VP index is changing, adjust num_mismatched_vp_indexes if
                 * it now matches or no longer matches vcpu_idx.
                 */
-               if (hv_vcpu->vp_index == vcpu_idx)
+               if (hv_vcpu->vp_index == vcpu->vcpu_idx)
                        atomic_inc(&hv->num_mismatched_vp_indexes);
-               else if (new_vp_index == vcpu_idx)
+               else if (new_vp_index == vcpu->vcpu_idx)
                        atomic_dec(&hv->num_mismatched_vp_indexes);
 
                hv_vcpu->vp_index = new_vp_index;
index 730da8537d058ddc6e57071ca31da24ab6c0868b..ed1c4e546d0495eeaa0f7e4ef85b964fc591fb81 100644 (file)
@@ -83,7 +83,7 @@ static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
 {
        struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
 
-       return hv_vcpu ? hv_vcpu->vp_index : kvm_vcpu_get_idx(vcpu);
+       return hv_vcpu ? hv_vcpu->vp_index : vcpu->vcpu_idx;
 }
 
 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
index ff005fe738a4c1dae5e2b6b4648e1d5f34cfd24c..8c065da73f8e541430770fd860653e4dee4a470c 100644 (file)
@@ -319,8 +319,8 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
        unsigned index;
        bool mask_before, mask_after;
        union kvm_ioapic_redirect_entry *e;
-       unsigned long vcpu_bitmap;
        int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode;
+       DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
 
        switch (ioapic->ioregsel) {
        case IOAPIC_REG_VERSION:
@@ -384,9 +384,9 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
                        irq.shorthand = APIC_DEST_NOSHORT;
                        irq.dest_id = e->fields.dest_id;
                        irq.msi_redir_hint = false;
-                       bitmap_zero(&vcpu_bitmap, 16);
+                       bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
                        kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
-                                                &vcpu_bitmap);
+                                                vcpu_bitmap);
                        if (old_dest_mode != e->fields.dest_mode ||
                            old_dest_id != e->fields.dest_id) {
                                /*
@@ -399,10 +399,10 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
                                    kvm_lapic_irq_dest_mode(
                                        !!e->fields.dest_mode);
                                kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
-                                                        &vcpu_bitmap);
+                                                        vcpu_bitmap);
                        }
                        kvm_make_scan_ioapic_request_mask(ioapic->kvm,
-                                                         &vcpu_bitmap);
+                                                         vcpu_bitmap);
                } else {
                        kvm_make_scan_ioapic_request(ioapic->kvm);
                }
index 76fb0092120377bec06e4f2e9aeb2d2bb7d3f099..d6ac32f3f650c066733ff9afe96ece36bca01523 100644 (file)
@@ -2321,13 +2321,14 @@ EXPORT_SYMBOL_GPL(kvm_apic_update_apicv);
 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
+       u64 msr_val;
        int i;
 
        if (!init_event) {
-               vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE |
-                                      MSR_IA32_APICBASE_ENABLE;
+               msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
                if (kvm_vcpu_is_reset_bsp(vcpu))
-                       vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
+                       msr_val |= MSR_IA32_APICBASE_BSP;
+               kvm_lapic_set_base(vcpu, msr_val);
        }
 
        if (!apic)
@@ -2336,11 +2337,9 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
        /* Stop the timer in case it's a reset to an active apic */
        hrtimer_cancel(&apic->lapic_timer.timer);
 
-       if (!init_event) {
-               apic->base_address = APIC_DEFAULT_PHYS_BASE;
-
+       /* The xAPIC ID is set at RESET even if the APIC was already enabled. */
+       if (!init_event)
                kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
-       }
        kvm_apic_set_version(apic->vcpu);
 
        for (i = 0; i < KVM_APIC_LVT_NUM; i++)
@@ -2481,6 +2480,11 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
                lapic_timer_advance_dynamic = false;
        }
 
+       /*
+        * Stuff the APIC ENABLE bit in lieu of temporarily incrementing
+        * apic_hw_disabled; the full RESET value is set by kvm_lapic_reset().
+        */
+       vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
        static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
        kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
 
@@ -2942,5 +2946,7 @@ int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
 void kvm_lapic_exit(void)
 {
        static_key_deferred_flush(&apic_hw_disabled);
+       WARN_ON(static_branch_unlikely(&apic_hw_disabled.key));
        static_key_deferred_flush(&apic_sw_disabled);
+       WARN_ON(static_branch_unlikely(&apic_sw_disabled.key));
 }
index 2d7e61122af81566d857cde0e8540aa852fb5795..0cc58901bf7a79a55fd3b9edce3eb6ebafc67a8c 100644 (file)
@@ -2027,8 +2027,8 @@ static void mmu_pages_clear_parents(struct mmu_page_path *parents)
        } while (!sp->unsync_children);
 }
 
-static void mmu_sync_children(struct kvm_vcpu *vcpu,
-                             struct kvm_mmu_page *parent)
+static int mmu_sync_children(struct kvm_vcpu *vcpu,
+                            struct kvm_mmu_page *parent, bool can_yield)
 {
        int i;
        struct kvm_mmu_page *sp;
@@ -2055,12 +2055,18 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
                }
                if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
                        kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
+                       if (!can_yield) {
+                               kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
+                               return -EINTR;
+                       }
+
                        cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
                        flush = false;
                }
        }
 
        kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
+       return 0;
 }
 
 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
@@ -2146,9 +2152,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                        kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
                }
 
-               if (sp->unsync_children)
-                       kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
-
                __clear_sp_write_flooding_count(sp);
 
 trace_get_page:
@@ -3684,7 +3687,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
                write_lock(&vcpu->kvm->mmu_lock);
                kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
 
-               mmu_sync_children(vcpu, sp);
+               mmu_sync_children(vcpu, sp, true);
 
                kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
                write_unlock(&vcpu->kvm->mmu_lock);
@@ -3700,7 +3703,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
                if (IS_VALID_PAE_ROOT(root)) {
                        root &= PT64_BASE_ADDR_MASK;
                        sp = to_shadow_page(root);
-                       mmu_sync_children(vcpu, sp);
+                       mmu_sync_children(vcpu, sp, true);
                }
        }
 
@@ -4593,10 +4596,10 @@ static void update_pkru_bitmask(struct kvm_mmu *mmu)
        unsigned bit;
        bool wp;
 
-       if (!is_cr4_pke(mmu)) {
-               mmu->pkru_mask = 0;
+       mmu->pkru_mask = 0;
+
+       if (!is_cr4_pke(mmu))
                return;
-       }
 
        wp = is_cr0_wp(mmu);
 
index 269f11f92fd052d5dcf2f6ed53d6f55bad21eecd..21427e84a82ef6dd2097e08e570b9cdd730a40a8 100644 (file)
@@ -164,13 +164,13 @@ void kvm_page_track_cleanup(struct kvm *kvm)
        cleanup_srcu_struct(&head->track_srcu);
 }
 
-void kvm_page_track_init(struct kvm *kvm)
+int kvm_page_track_init(struct kvm *kvm)
 {
        struct kvm_page_track_notifier_head *head;
 
        head = &kvm->arch.track_notifier_head;
-       init_srcu_struct(&head->track_srcu);
        INIT_HLIST_HEAD(&head->track_notifier_list);
+       return init_srcu_struct(&head->track_srcu);
 }
 
 /*
index 7d03e9b7ccfa9796f91434498e7ebaacf16db4a4..913d52a7923e654576b1af8ad0400a55cd36e86a 100644 (file)
@@ -707,8 +707,27 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
                if (!is_shadow_present_pte(*it.sptep)) {
                        table_gfn = gw->table_gfn[it.level - 2];
                        access = gw->pt_access[it.level - 2];
-                       sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
-                                             false, access);
+                       sp = kvm_mmu_get_page(vcpu, table_gfn, addr,
+                                             it.level-1, false, access);
+                       /*
+                        * We must synchronize the pagetable before linking it
+                        * because the guest doesn't need to flush tlb when
+                        * the gpte is changed from non-present to present.
+                        * Otherwise, the guest may use the wrong mapping.
+                        *
+                        * For PG_LEVEL_4K, kvm_mmu_get_page() has already
+                        * synchronized it transiently via kvm_sync_page().
+                        *
+                        * For higher level pagetable, we synchronize it via
+                        * the slower mmu_sync_children().  If it needs to
+                        * break, some progress has been made; return
+                        * RET_PF_RETRY and retry on the next #PF.
+                        * KVM_REQ_MMU_SYNC is not necessary but it
+                        * expedites the process.
+                        */
+                       if (sp->unsync_children &&
+                           mmu_sync_children(vcpu, sp, false))
+                               return RET_PF_RETRY;
                }
 
                /*
@@ -1047,14 +1066,6 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr,
  * Using the cached information from sp->gfns is safe because:
  * - The spte has a reference to the struct page, so the pfn for a given gfn
  *   can't change unless all sptes pointing to it are nuked first.
- *
- * Note:
- *   We should flush all tlbs if spte is dropped even though guest is
- *   responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
- *   and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
- *   used by guest then tlbs are not flushed, so guest is allowed to access the
- *   freed pages.
- *   And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
  */
 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
@@ -1107,13 +1118,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
                        return 0;
 
                if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
-                       /*
-                        * Update spte before increasing tlbs_dirty to make
-                        * sure no tlb flush is lost after spte is zapped; see
-                        * the comments in kvm_flush_remote_tlbs().
-                        */
-                       smp_wmb();
-                       vcpu->kvm->tlbs_dirty++;
+                       set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
                        continue;
                }
 
@@ -1128,12 +1133,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 
                if (gfn != sp->gfns[i]) {
                        drop_spte(vcpu->kvm, &sp->spt[i]);
-                       /*
-                        * The same as above where we are doing
-                        * prefetch_invalid_gpte().
-                        */
-                       smp_wmb();
-                       vcpu->kvm->tlbs_dirty++;
+                       set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
                        continue;
                }
 
index 2545d0c61985bd47b1d79384bfc4133cc3ce3649..510b833cbd39915c021db25d357cf0bedf777a43 100644 (file)
@@ -545,7 +545,6 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
                (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
                (svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits);
 
-       svm->vmcb->control.virt_ext            = svm->nested.ctl.virt_ext;
        svm->vmcb->control.int_vector          = svm->nested.ctl.int_vector;
        svm->vmcb->control.int_state           = svm->nested.ctl.int_state;
        svm->vmcb->control.event_inj           = svm->nested.ctl.event_inj;
@@ -579,7 +578,7 @@ static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to
 }
 
 int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
-                        struct vmcb *vmcb12)
+                        struct vmcb *vmcb12, bool from_vmrun)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        int ret;
@@ -609,13 +608,16 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
        nested_vmcb02_prepare_save(svm, vmcb12);
 
        ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
-                                 nested_npt_enabled(svm), true);
+                                 nested_npt_enabled(svm), from_vmrun);
        if (ret)
                return ret;
 
        if (!npt_enabled)
                vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
 
+       if (!from_vmrun)
+               kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
+
        svm_set_gif(svm, true);
 
        return 0;
@@ -681,7 +683,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
 
        svm->nested.nested_run_pending = 1;
 
-       if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12))
+       if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
                goto out_exit_err;
 
        if (nested_svm_vmrun_msrpm(svm))
index 75e0b21ad07c9e89a54131be45bdcca3fa88af4e..2e4916be290ee167eb8681ba3d7401112d5b86d7 100644 (file)
@@ -595,43 +595,55 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
        return 0;
 }
 
-static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
+static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
+                                   int *error)
 {
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
        struct sev_data_launch_update_vmsa vmsa;
+       struct vcpu_svm *svm = to_svm(vcpu);
+       int ret;
+
+       /* Perform some pre-encryption checks against the VMSA */
+       ret = sev_es_sync_vmsa(svm);
+       if (ret)
+               return ret;
+
+       /*
+        * The LAUNCH_UPDATE_VMSA command will perform in-place encryption of
+        * the VMSA memory content (i.e it will write the same memory region
+        * with the guest's key), so invalidate it first.
+        */
+       clflush_cache_range(svm->vmsa, PAGE_SIZE);
+
+       vmsa.reserved = 0;
+       vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
+       vmsa.address = __sme_pa(svm->vmsa);
+       vmsa.len = PAGE_SIZE;
+       ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
+       if (ret)
+         return ret;
+
+       vcpu->arch.guest_state_protected = true;
+       return 0;
+}
+
+static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
        struct kvm_vcpu *vcpu;
        int i, ret;
 
        if (!sev_es_guest(kvm))
                return -ENOTTY;
 
-       vmsa.reserved = 0;
-
        kvm_for_each_vcpu(i, vcpu, kvm) {
-               struct vcpu_svm *svm = to_svm(vcpu);
-
-               /* Perform some pre-encryption checks against the VMSA */
-               ret = sev_es_sync_vmsa(svm);
+               ret = mutex_lock_killable(&vcpu->mutex);
                if (ret)
                        return ret;
 
-               /*
-                * The LAUNCH_UPDATE_VMSA command will perform in-place
-                * encryption of the VMSA memory content (i.e it will write
-                * the same memory region with the guest's key), so invalidate
-                * it first.
-                */
-               clflush_cache_range(svm->vmsa, PAGE_SIZE);
+               ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error);
 
-               vmsa.handle = sev->handle;
-               vmsa.address = __sme_pa(svm->vmsa);
-               vmsa.len = PAGE_SIZE;
-               ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa,
-                                   &argp->error);
+               mutex_unlock(&vcpu->mutex);
                if (ret)
                        return ret;
-
-               svm->vcpu.arch.guest_state_protected = true;
        }
 
        return 0;
@@ -1397,8 +1409,10 @@ static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
 
        /* Bind ASID to this guest */
        ret = sev_bind_asid(kvm, start.handle, error);
-       if (ret)
+       if (ret) {
+               sev_decommission(start.handle);
                goto e_free_session;
+       }
 
        params.handle = start.handle;
        if (copy_to_user((void __user *)(uintptr_t)argp->data,
@@ -1464,12 +1478,19 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
 
        /* Pin guest memory */
        guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
-                                   PAGE_SIZE, &n, 0);
+                                   PAGE_SIZE, &n, 1);
        if (IS_ERR(guest_page)) {
                ret = PTR_ERR(guest_page);
                goto e_free_trans;
        }
 
+       /*
+        * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP
+        * encrypts the written data with the guest's key, and the cache may
+        * contain dirty, unencrypted data.
+        */
+       sev_clflush_pages(guest_page, n);
+
        /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
        data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
        data.guest_address |= sev_me_mask;
@@ -1501,6 +1522,20 @@ static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
        return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
 }
 
+static bool cmd_allowed_from_miror(u32 cmd_id)
+{
+       /*
+        * Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES
+        * active mirror VMs. Also allow the debugging and status commands.
+        */
+       if (cmd_id == KVM_SEV_LAUNCH_UPDATE_VMSA ||
+           cmd_id == KVM_SEV_GUEST_STATUS || cmd_id == KVM_SEV_DBG_DECRYPT ||
+           cmd_id == KVM_SEV_DBG_ENCRYPT)
+               return true;
+
+       return false;
+}
+
 int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
 {
        struct kvm_sev_cmd sev_cmd;
@@ -1517,8 +1552,9 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
 
        mutex_lock(&kvm->lock);
 
-       /* enc_context_owner handles all memory enc operations */
-       if (is_mirroring_enc_context(kvm)) {
+       /* Only the enc_context_owner handles some memory enc operations. */
+       if (is_mirroring_enc_context(kvm) &&
+           !cmd_allowed_from_miror(sev_cmd.id)) {
                r = -EINVAL;
                goto out;
        }
@@ -1715,8 +1751,7 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
 {
        struct file *source_kvm_file;
        struct kvm *source_kvm;
-       struct kvm_sev_info *mirror_sev;
-       unsigned int asid;
+       struct kvm_sev_info source_sev, *mirror_sev;
        int ret;
 
        source_kvm_file = fget(source_fd);
@@ -1739,7 +1774,8 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
                goto e_source_unlock;
        }
 
-       asid = to_kvm_svm(source_kvm)->sev_info.asid;
+       memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info,
+              sizeof(source_sev));
 
        /*
         * The mirror kvm holds an enc_context_owner ref so its asid can't
@@ -1759,8 +1795,16 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
        /* Set enc_context_owner and copy its encryption context over */
        mirror_sev = &to_kvm_svm(kvm)->sev_info;
        mirror_sev->enc_context_owner = source_kvm;
-       mirror_sev->asid = asid;
        mirror_sev->active = true;
+       mirror_sev->asid = source_sev.asid;
+       mirror_sev->fd = source_sev.fd;
+       mirror_sev->es_active = source_sev.es_active;
+       mirror_sev->handle = source_sev.handle;
+       /*
+        * Do not copy ap_jump_table. Since the mirror does not share the same
+        * KVM contexts as the original, and they may have different
+        * memory-views.
+        */
 
        mutex_unlock(&kvm->lock);
        return 0;
@@ -2551,7 +2595,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
                return -EINVAL;
 
        return kvm_sev_es_string_io(&svm->vcpu, size, port,
-                                   svm->ghcb_sa, svm->ghcb_sa_len, in);
+                                   svm->ghcb_sa, svm->ghcb_sa_len / size, in);
 }
 
 void sev_es_init_vmcb(struct vcpu_svm *svm)
index 05e8d4d279699ee003a028eaee2a5047a38f7587..989685098b3ea7d62f251bd3b1ac39de7e2391c6 100644 (file)
@@ -1566,6 +1566,8 @@ static void svm_clear_vintr(struct vcpu_svm *svm)
 
                svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
                        V_IRQ_INJECTION_BITS_MASK;
+
+               svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
        }
 
        vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
@@ -2222,6 +2224,10 @@ static int gp_interception(struct kvm_vcpu *vcpu)
        if (error_code)
                goto reinject;
 
+       /* All SVM instructions expect page aligned RAX */
+       if (svm->vmcb->save.rax & ~PAGE_MASK)
+               goto reinject;
+
        /* Decode the instruction for usage later */
        if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK)
                goto reinject;
@@ -4285,43 +4291,44 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
        struct kvm_host_map map_save;
        int ret;
 
-       if (is_guest_mode(vcpu)) {
-               /* FED8h - SVM Guest */
-               put_smstate(u64, smstate, 0x7ed8, 1);
-               /* FEE0h - SVM Guest VMCB Physical Address */
-               put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
+       if (!is_guest_mode(vcpu))
+               return 0;
 
-               svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
-               svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
-               svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
+       /* FED8h - SVM Guest */
+       put_smstate(u64, smstate, 0x7ed8, 1);
+       /* FEE0h - SVM Guest VMCB Physical Address */
+       put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
 
-               ret = nested_svm_vmexit(svm);
-               if (ret)
-                       return ret;
+       svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
+       svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
+       svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
 
-               /*
-                * KVM uses VMCB01 to store L1 host state while L2 runs but
-                * VMCB01 is going to be used during SMM and thus the state will
-                * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
-                * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
-                * format of the area is identical to guest save area offsetted
-                * by 0x400 (matches the offset of 'struct vmcb_save_area'
-                * within 'struct vmcb'). Note: HSAVE area may also be used by
-                * L1 hypervisor to save additional host context (e.g. KVM does
-                * that, see svm_prepare_guest_switch()) which must be
-                * preserved.
-                */
-               if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
-                                &map_save) == -EINVAL)
-                       return 1;
+       ret = nested_svm_vmexit(svm);
+       if (ret)
+               return ret;
 
-               BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
+       /*
+        * KVM uses VMCB01 to store L1 host state while L2 runs but
+        * VMCB01 is going to be used during SMM and thus the state will
+        * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
+        * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
+        * format of the area is identical to guest save area offsetted
+        * by 0x400 (matches the offset of 'struct vmcb_save_area'
+        * within 'struct vmcb'). Note: HSAVE area may also be used by
+        * L1 hypervisor to save additional host context (e.g. KVM does
+        * that, see svm_prepare_guest_switch()) which must be
+        * preserved.
+        */
+       if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
+                        &map_save) == -EINVAL)
+               return 1;
 
-               svm_copy_vmrun_state(map_save.hva + 0x400,
-                                    &svm->vmcb01.ptr->save);
+       BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
 
-               kvm_vcpu_unmap(vcpu, &map_save, true);
-       }
+       svm_copy_vmrun_state(map_save.hva + 0x400,
+                            &svm->vmcb01.ptr->save);
+
+       kvm_vcpu_unmap(vcpu, &map_save, true);
        return 0;
 }
 
@@ -4329,50 +4336,54 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        struct kvm_host_map map, map_save;
-       int ret = 0;
+       u64 saved_efer, vmcb12_gpa;
+       struct vmcb *vmcb12;
+       int ret;
 
-       if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
-               u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
-               u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
-               u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
-               struct vmcb *vmcb12;
+       if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
+               return 0;
 
-               if (guest) {
-                       if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
-                               return 1;
+       /* Non-zero if SMI arrived while vCPU was in guest mode. */
+       if (!GET_SMSTATE(u64, smstate, 0x7ed8))
+               return 0;
 
-                       if (!(saved_efer & EFER_SVME))
-                               return 1;
+       if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
+               return 1;
 
-                       if (kvm_vcpu_map(vcpu,
-                                        gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
-                               return 1;
+       saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
+       if (!(saved_efer & EFER_SVME))
+               return 1;
 
-                       if (svm_allocate_nested(svm))
-                               return 1;
+       vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
+       if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
+               return 1;
 
-                       vmcb12 = map.hva;
+       ret = 1;
+       if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save) == -EINVAL)
+               goto unmap_map;
 
-                       nested_load_control_from_vmcb12(svm, &vmcb12->control);
+       if (svm_allocate_nested(svm))
+               goto unmap_save;
 
-                       ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12);
-                       kvm_vcpu_unmap(vcpu, &map, true);
+       /*
+        * Restore L1 host state from L1 HSAVE area as VMCB01 was
+        * used during SMM (see svm_enter_smm())
+        */
 
-                       /*
-                        * Restore L1 host state from L1 HSAVE area as VMCB01 was
-                        * used during SMM (see svm_enter_smm())
-                        */
-                       if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
-                                        &map_save) == -EINVAL)
-                               return 1;
+       svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);
 
-                       svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
-                                            map_save.hva + 0x400);
+       /*
+        * Enter the nested guest now
+        */
 
-                       kvm_vcpu_unmap(vcpu, &map_save, true);
-               }
-       }
+       vmcb12 = map.hva;
+       nested_load_control_from_vmcb12(svm, &vmcb12->control);
+       ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
 
+unmap_save:
+       kvm_vcpu_unmap(vcpu, &map_save, true);
+unmap_map:
+       kvm_vcpu_unmap(vcpu, &map, true);
        return ret;
 }
 
index 524d943f3efc6a60b6b97a24d243094b694dc397..5d30db599e10ddfcc76abe10f78ebf781e5b692e 100644 (file)
@@ -191,7 +191,7 @@ struct vcpu_svm {
 
        /* SEV-ES scratch area support */
        void *ghcb_sa;
-       u64 ghcb_sa_len;
+       u32 ghcb_sa_len;
        bool ghcb_sa_sync;
        bool ghcb_sa_free;
 
@@ -459,7 +459,8 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
        return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
 }
 
-int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12);
+int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
+                        u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
 void svm_leave_nested(struct vcpu_svm *svm);
 void svm_free_nested(struct vcpu_svm *svm);
 int svm_allocate_nested(struct vcpu_svm *svm);
index 0dab1b7b529f409b352891ae7addc21e5e3cd1f2..ba6f99f584ac33e1f0053bf0200cb4f6668aba98 100644 (file)
@@ -353,14 +353,20 @@ void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata)
        switch (msr_index) {
        case MSR_IA32_VMX_EXIT_CTLS:
        case MSR_IA32_VMX_TRUE_EXIT_CTLS:
-               ctl_high &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
+               ctl_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
                break;
        case MSR_IA32_VMX_ENTRY_CTLS:
        case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
-               ctl_high &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+               ctl_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
                break;
        case MSR_IA32_VMX_PROCBASED_CTLS2:
-               ctl_high &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+               ctl_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
+               break;
+       case MSR_IA32_VMX_PINBASED_CTLS:
+               ctl_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
+               break;
+       case MSR_IA32_VMX_VMFUNC:
+               ctl_low &= ~EVMCS1_UNSUPPORTED_VMFUNC;
                break;
        }
 
index ccb03d69546ce716b8ed5026fc89ce35beaa20e4..eedcebf5800412433c44a8b63698ad165f4d4097 100644 (file)
@@ -2583,8 +2583,13 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
         * Guest state is invalid and unrestricted guest is disabled,
         * which means L1 attempted VMEntry to L2 with invalid state.
         * Fail the VMEntry.
+        *
+        * However when force loading the guest state (SMM exit or
+        * loading nested state after migration, it is possible to
+        * have invalid guest state now, which will be later fixed by
+        * restoring L2 register state
         */
-       if (CC(!vmx_guest_state_valid(vcpu))) {
+       if (CC(from_vmentry && !vmx_guest_state_valid(vcpu))) {
                *entry_failure_code = ENTRY_FAIL_DEFAULT;
                return -EINVAL;
        }
@@ -4351,6 +4356,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
        if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
                                vmcs12->vm_exit_msr_load_count))
                nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
+
+       to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
 }
 
 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
@@ -4899,14 +4906,7 @@ out_vmcs02:
        return -ENOMEM;
 }
 
-/*
- * Emulate the VMXON instruction.
- * Currently, we just remember that VMX is active, and do not save or even
- * inspect the argument to VMXON (the so-called "VMXON pointer") because we
- * do not currently need to store anything in that guest-allocated memory
- * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
- * argument is different from the VMXON pointer (which the spec says they do).
- */
+/* Emulate the VMXON instruction. */
 static int handle_vmon(struct kvm_vcpu *vcpu)
 {
        int ret;
@@ -5903,6 +5903,12 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
        case EXIT_REASON_VMFUNC:
                /* VM functions are emulated through L2->L0 vmexits. */
                return true;
+       case EXIT_REASON_BUS_LOCK:
+               /*
+                * At present, bus lock VM exit is never exposed to L1.
+                * Handle L2's bus locks in L0 directly.
+                */
+               return true;
        default:
                break;
        }
index 0c2c0d5ae8734b0da27563f5e9f97077b6d6d041..7d595effb66f0819d97e132bb18948231e218e4e 100644 (file)
@@ -1323,7 +1323,7 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
        vmx_prepare_switch_to_host(to_vmx(vcpu));
 }
 
-static bool emulation_required(struct kvm_vcpu *vcpu)
+bool vmx_emulation_required(struct kvm_vcpu *vcpu)
 {
        return emulate_invalid_guest_state && !vmx_guest_state_valid(vcpu);
 }
@@ -1367,7 +1367,7 @@ void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
        vmcs_writel(GUEST_RFLAGS, rflags);
 
        if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM)
-               vmx->emulation_required = emulation_required(vcpu);
+               vmx->emulation_required = vmx_emulation_required(vcpu);
 }
 
 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
@@ -1837,10 +1837,11 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                                    &msr_info->data))
                        return 1;
                /*
-                * Enlightened VMCS v1 doesn't have certain fields, but buggy
-                * Hyper-V versions are still trying to use corresponding
-                * features when they are exposed. Filter out the essential
-                * minimum.
+                * Enlightened VMCS v1 doesn't have certain VMCS fields but
+                * instead of just ignoring the features, different Hyper-V
+                * versions are either trying to use them and fail or do some
+                * sanity checking and refuse to boot. Filter all unsupported
+                * features out.
                 */
                if (!msr_info->host_initiated &&
                    vmx->nested.enlightened_vmcs_enabled)
@@ -3077,7 +3078,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
        }
 
        /* depends on vcpu->arch.cr0 to be set to a new value */
-       vmx->emulation_required = emulation_required(vcpu);
+       vmx->emulation_required = vmx_emulation_required(vcpu);
 }
 
 static int vmx_get_max_tdp_level(void)
@@ -3330,7 +3331,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int
 {
        __vmx_set_segment(vcpu, var, seg);
 
-       to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
+       to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
 }
 
 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
@@ -5561,9 +5562,13 @@ static int handle_encls(struct kvm_vcpu *vcpu)
 
 static int handle_bus_lock_vmexit(struct kvm_vcpu *vcpu)
 {
-       vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK;
-       vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK;
-       return 0;
+       /*
+        * Hardware may or may not set the BUS_LOCK_DETECTED flag on BUS_LOCK
+        * VM-Exits. Unconditionally set the flag here and leave the handling to
+        * vmx_handle_exit().
+        */
+       to_vmx(vcpu)->exit_reason.bus_lock_detected = true;
+       return 1;
 }
 
 /*
@@ -6050,9 +6055,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
        int ret = __vmx_handle_exit(vcpu, exit_fastpath);
 
        /*
-        * Even when current exit reason is handled by KVM internally, we
-        * still need to exit to user space when bus lock detected to inform
-        * that there is a bus lock in guest.
+        * Exit to user space when bus lock detected to inform that there is
+        * a bus lock in guest.
         */
        if (to_vmx(vcpu)->exit_reason.bus_lock_detected) {
                if (ret > 0)
@@ -6301,18 +6305,13 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
 
                /*
                 * If we are running L2 and L1 has a new pending interrupt
-                * which can be injected, we should re-evaluate
-                * what should be done with this new L1 interrupt.
-                * If L1 intercepts external-interrupts, we should
-                * exit from L2 to L1. Otherwise, interrupt should be
-                * delivered directly to L2.
+                * which can be injected, this may cause a vmexit or it may
+                * be injected into L2.  Either way, this interrupt will be
+                * processed via KVM_REQ_EVENT, not RVI, because we do not use
+                * virtual interrupt delivery to inject L1 interrupts into L2.
                 */
-               if (is_guest_mode(vcpu) && max_irr_updated) {
-                       if (nested_exit_on_intr(vcpu))
-                               kvm_vcpu_exiting_guest_mode(vcpu);
-                       else
-                               kvm_make_request(KVM_REQ_EVENT, vcpu);
-               }
+               if (is_guest_mode(vcpu) && max_irr_updated)
+                       kvm_make_request(KVM_REQ_EVENT, vcpu);
        } else {
                max_irr = kvm_lapic_find_highest_irr(vcpu);
        }
@@ -6621,10 +6620,24 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
                     vmx->loaded_vmcs->soft_vnmi_blocked))
                vmx->loaded_vmcs->entry_time = ktime_get();
 
-       /* Don't enter VMX if guest state is invalid, let the exit handler
-          start emulation until we arrive back to a valid state */
-       if (vmx->emulation_required)
+       /*
+        * Don't enter VMX if guest state is invalid, let the exit handler
+        * start emulation until we arrive back to a valid state.  Synthesize a
+        * consistency check VM-Exit due to invalid guest state and bail.
+        */
+       if (unlikely(vmx->emulation_required)) {
+
+               /* We don't emulate invalid state of a nested guest */
+               vmx->fail = is_guest_mode(vcpu);
+
+               vmx->exit_reason.full = EXIT_REASON_INVALID_STATE;
+               vmx->exit_reason.failed_vmentry = 1;
+               kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
+               vmx->exit_qualification = ENTRY_FAIL_DEFAULT;
+               kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
+               vmx->exit_intr_info = 0;
                return EXIT_FASTPATH_NONE;
+       }
 
        trace_kvm_entry(vcpu);
 
@@ -6833,7 +6846,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
                 */
                tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
                if (tsx_ctrl)
-                       vmx->guest_uret_msrs[i].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
+                       tsx_ctrl->mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
        }
 
        err = alloc_loaded_vmcs(&vmx->vmcs01);
index 4858c5fd95f27dd7b393c6c04c013bacae15a878..592217fd7d920229ff7c1bbc185a68d867254211 100644 (file)
@@ -248,12 +248,8 @@ struct vcpu_vmx {
         * only loaded into hardware when necessary, e.g. SYSCALL #UDs outside
         * of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to
         * be loaded into hardware if those conditions aren't met.
-        * nr_active_uret_msrs tracks the number of MSRs that need to be loaded
-        * into hardware when running the guest.  guest_uret_msrs[] is resorted
-        * whenever the number of "active" uret MSRs is modified.
         */
        struct vmx_uret_msr   guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
-       int                   nr_active_uret_msrs;
        bool                  guest_uret_msrs_loaded;
 #ifdef CONFIG_X86_64
        u64                   msr_host_kernel_gs_base;
@@ -359,6 +355,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
                        unsigned long fs_base, unsigned long gs_base);
 int vmx_get_cpl(struct kvm_vcpu *vcpu);
+bool vmx_emulation_required(struct kvm_vcpu *vcpu);
 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
index 28ef14155726461ec935c711ce8cc0ebe22c0bdf..b26647a5ea2297e9730e2bfbbcd92d184efb0e71 100644 (file)
@@ -1332,6 +1332,13 @@ static const u32 msrs_to_save_all[] = {
        MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13,
        MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15,
        MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
+
+       MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
+       MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
+       MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,
+       MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
+       MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
+       MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,
 };
 
 static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];
@@ -2969,7 +2976,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
                                       offsetof(struct compat_vcpu_info, time));
        if (vcpu->xen.vcpu_time_info_set)
                kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0);
-       if (v == kvm_get_vcpu(v->kvm, 0))
+       if (!v->vcpu_idx)
                kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
        return 0;
 }
@@ -6899,7 +6906,7 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
 }
 
 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
-                              unsigned short port, void *val,
+                              unsigned short port,
                               unsigned int count, bool in)
 {
        vcpu->arch.pio.port = port;
@@ -6907,10 +6914,8 @@ static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
        vcpu->arch.pio.count  = count;
        vcpu->arch.pio.size = size;
 
-       if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
-               vcpu->arch.pio.count = 0;
+       if (!kernel_pio(vcpu, vcpu->arch.pio_data))
                return 1;
-       }
 
        vcpu->run->exit_reason = KVM_EXIT_IO;
        vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
@@ -6922,26 +6927,39 @@ static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
        return 0;
 }
 
-static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
-                          unsigned short port, void *val, unsigned int count)
+static int __emulator_pio_in(struct kvm_vcpu *vcpu, int size,
+                            unsigned short port, unsigned int count)
 {
-       int ret;
+       WARN_ON(vcpu->arch.pio.count);
+       memset(vcpu->arch.pio_data, 0, size * count);
+       return emulator_pio_in_out(vcpu, size, port, count, true);
+}
 
-       if (vcpu->arch.pio.count)
-               goto data_avail;
+static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val)
+{
+       int size = vcpu->arch.pio.size;
+       unsigned count = vcpu->arch.pio.count;
+       memcpy(val, vcpu->arch.pio_data, size * count);
+       trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data);
+       vcpu->arch.pio.count = 0;
+}
 
-       memset(vcpu->arch.pio_data, 0, size * count);
+static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
+                          unsigned short port, void *val, unsigned int count)
+{
+       if (vcpu->arch.pio.count) {
+               /* Complete previous iteration.  */
+       } else {
+               int r = __emulator_pio_in(vcpu, size, port, count);
+               if (!r)
+                       return r;
 
-       ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
-       if (ret) {
-data_avail:
-               memcpy(val, vcpu->arch.pio_data, size * count);
-               trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data);
-               vcpu->arch.pio.count = 0;
-               return 1;
+               /* Results already available, fall through.  */
        }
 
-       return 0;
+       WARN_ON(count != vcpu->arch.pio.count);
+       complete_emulator_pio_in(vcpu, val);
+       return 1;
 }
 
 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
@@ -6956,9 +6974,15 @@ static int emulator_pio_out(struct kvm_vcpu *vcpu, int size,
                            unsigned short port, const void *val,
                            unsigned int count)
 {
+       int ret;
+
        memcpy(vcpu->arch.pio_data, val, size * count);
        trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data);
-       return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
+       ret = emulator_pio_in_out(vcpu, size, port, count, false);
+       if (ret)
+                vcpu->arch.pio.count = 0;
+
+        return ret;
 }
 
 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
@@ -7658,6 +7682,13 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
 
                /* Process a latched INIT or SMI, if any.  */
                kvm_make_request(KVM_REQ_EVENT, vcpu);
+
+               /*
+                * Even if KVM_SET_SREGS2 loaded PDPTRs out of band,
+                * on SMM exit we still need to reload them from
+                * guest memory
+                */
+               vcpu->arch.pdptrs_from_userspace = false;
        }
 
        kvm_mmu_reset_context(vcpu);
@@ -9629,14 +9660,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
                        break;
 
-                if (unlikely(kvm_vcpu_exit_request(vcpu))) {
+               if (vcpu->arch.apicv_active)
+                       static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+
+               if (unlikely(kvm_vcpu_exit_request(vcpu))) {
                        exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
                        break;
                }
-
-               if (vcpu->arch.apicv_active)
-                       static_call(kvm_x86_sync_pir_to_irr)(vcpu);
-        }
+       }
 
        /*
         * Do this here before restoring debug registers on the host.  And
@@ -10652,6 +10683,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
        int r;
 
        vcpu->arch.last_vmentry_cpu = -1;
+       vcpu->arch.regs_avail = ~0;
+       vcpu->arch.regs_dirty = ~0;
 
        if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
                vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
@@ -10893,6 +10926,9 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
        kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
        kvm_rip_write(vcpu, 0xfff0);
 
+       vcpu->arch.cr3 = 0;
+       kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
+
        /*
         * CR0.CD/NW are set on RESET, preserved on INIT.  Note, some versions
         * of Intel's SDM list CD/NW as being set on INIT, but they contradict
@@ -11139,9 +11175,15 @@ void kvm_arch_free_vm(struct kvm *kvm)
 
 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 {
+       int ret;
+
        if (type)
                return -EINVAL;
 
+       ret = kvm_page_track_init(kvm);
+       if (ret)
+               return ret;
+
        INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
        INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
        INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
@@ -11174,7 +11216,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
        kvm_apicv_init(kvm);
        kvm_hv_init_vm(kvm);
-       kvm_page_track_init(kvm);
        kvm_mmu_init_vm(kvm);
        kvm_xen_init_vm(kvm);
 
@@ -11368,7 +11409,8 @@ static int memslot_rmap_alloc(struct kvm_memory_slot *slot,
                int level = i + 1;
                int lpages = __kvm_mmu_slot_lpages(slot, npages, level);
 
-               WARN_ON(slot->arch.rmap[i]);
+               if (slot->arch.rmap[i])
+                       continue;
 
                slot->arch.rmap[i] = kvcalloc(lpages, sz, GFP_KERNEL_ACCOUNT);
                if (!slot->arch.rmap[i]) {
@@ -12343,44 +12385,81 @@ int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes,
 }
 EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read);
 
-static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
+static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
+                          unsigned int port);
+
+static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu)
 {
-       memcpy(vcpu->arch.guest_ins_data, vcpu->arch.pio_data,
-              vcpu->arch.pio.count * vcpu->arch.pio.size);
-       vcpu->arch.pio.count = 0;
+       int size = vcpu->arch.pio.size;
+       int port = vcpu->arch.pio.port;
 
+       vcpu->arch.pio.count = 0;
+       if (vcpu->arch.sev_pio_count)
+               return kvm_sev_es_outs(vcpu, size, port);
        return 1;
 }
 
 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
-                          unsigned int port, void *data,  unsigned int count)
+                          unsigned int port)
 {
-       int ret;
-
-       ret = emulator_pio_out_emulated(vcpu->arch.emulate_ctxt, size, port,
-                                       data, count);
-       if (ret)
-               return ret;
+       for (;;) {
+               unsigned int count =
+                       min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
+               int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count);
+
+               /* memcpy done already by emulator_pio_out.  */
+               vcpu->arch.sev_pio_count -= count;
+               vcpu->arch.sev_pio_data += count * vcpu->arch.pio.size;
+               if (!ret)
+                       break;
 
-       vcpu->arch.pio.count = 0;
+               /* Emulation done by the kernel.  */
+               if (!vcpu->arch.sev_pio_count)
+                       return 1;
+       }
 
+       vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs;
        return 0;
 }
 
 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
-                         unsigned int port, void *data, unsigned int count)
+                         unsigned int port);
+
+static void advance_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
 {
-       int ret;
+       unsigned count = vcpu->arch.pio.count;
+       complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data);
+       vcpu->arch.sev_pio_count -= count;
+       vcpu->arch.sev_pio_data += count * vcpu->arch.pio.size;
+}
 
-       ret = emulator_pio_in_emulated(vcpu->arch.emulate_ctxt, size, port,
-                                      data, count);
-       if (ret) {
-               vcpu->arch.pio.count = 0;
-       } else {
-               vcpu->arch.guest_ins_data = data;
-               vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
+static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
+{
+       int size = vcpu->arch.pio.size;
+       int port = vcpu->arch.pio.port;
+
+       advance_sev_es_emulated_ins(vcpu);
+       if (vcpu->arch.sev_pio_count)
+               return kvm_sev_es_ins(vcpu, size, port);
+       return 1;
+}
+
+static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
+                         unsigned int port)
+{
+       for (;;) {
+               unsigned int count =
+                       min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
+               if (!__emulator_pio_in(vcpu, size, port, count))
+                       break;
+
+               /* Emulation done by the kernel.  */
+               advance_sev_es_emulated_ins(vcpu);
+               if (!vcpu->arch.sev_pio_count)
+                       return 1;
        }
 
+       vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
        return 0;
 }
 
@@ -12388,8 +12467,10 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
                         unsigned int port, void *data,  unsigned int count,
                         int in)
 {
-       return in ? kvm_sev_es_ins(vcpu, size, port, data, count)
-                 : kvm_sev_es_outs(vcpu, size, port, data, count);
+       vcpu->arch.sev_pio_data = data;
+       vcpu->arch.sev_pio_count = count;
+       return in ? kvm_sev_es_ins(vcpu, size, port)
+                 : kvm_sev_es_outs(vcpu, size, port);
 }
 EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
 
index 0fe6aacef3db2bd2203d687e91c88ef09e173385..9ea57389c554b7313d726e780fc51fea6977a761 100644 (file)
@@ -1341,9 +1341,10 @@ st:                      if (is_imm8(insn->off))
                        if (insn->imm == (BPF_AND | BPF_FETCH) ||
                            insn->imm == (BPF_OR | BPF_FETCH) ||
                            insn->imm == (BPF_XOR | BPF_FETCH)) {
-                               u8 *branch_target;
                                bool is64 = BPF_SIZE(insn->code) == BPF_DW;
                                u32 real_src_reg = src_reg;
+                               u32 real_dst_reg = dst_reg;
+                               u8 *branch_target;
 
                                /*
                                 * Can't be implemented with a single x86 insn.
@@ -1354,11 +1355,13 @@ st:                     if (is_imm8(insn->off))
                                emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
                                if (src_reg == BPF_REG_0)
                                        real_src_reg = BPF_REG_AX;
+                               if (dst_reg == BPF_REG_0)
+                                       real_dst_reg = BPF_REG_AX;
 
                                branch_target = prog;
                                /* Load old value */
                                emit_ldx(&prog, BPF_SIZE(insn->code),
-                                        BPF_REG_0, dst_reg, insn->off);
+                                        BPF_REG_0, real_dst_reg, insn->off);
                                /*
                                 * Perform the (commutative) operation locally,
                                 * put the result in the AUX_REG.
@@ -1369,7 +1372,8 @@ st:                       if (is_imm8(insn->off))
                                      add_2reg(0xC0, AUX_REG, real_src_reg));
                                /* Attempt to swap in new value */
                                err = emit_atomic(&prog, BPF_CMPXCHG,
-                                                 dst_reg, AUX_REG, insn->off,
+                                                 real_dst_reg, AUX_REG,
+                                                 insn->off,
                                                  BPF_SIZE(insn->code));
                                if (WARN_ON(err))
                                        return err;
@@ -1383,11 +1387,10 @@ st:                     if (is_imm8(insn->off))
                                /* Restore R0 after clobbering RAX */
                                emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
                                break;
-
                        }
 
                        err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
-                                                 insn->off, BPF_SIZE(insn->code));
+                                         insn->off, BPF_SIZE(insn->code));
                        if (err)
                                return err;
                        break;
@@ -1744,7 +1747,7 @@ static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
 }
 
 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
-                          struct bpf_prog *p, int stack_size, bool mod_ret)
+                          struct bpf_prog *p, int stack_size, bool save_ret)
 {
        u8 *prog = *pprog;
        u8 *jmp_insn;
@@ -1777,11 +1780,15 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
        if (emit_call(&prog, p->bpf_func, prog))
                return -EINVAL;
 
-       /* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
+       /*
+        * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
         * of the previous call which is then passed on the stack to
         * the next BPF program.
+        *
+        * BPF_TRAMP_FENTRY trampoline may need to return the return
+        * value of BPF_PROG_TYPE_STRUCT_OPS prog.
         */
-       if (mod_ret)
+       if (save_ret)
                emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
 
        /* replace 2 nops with JE insn, since jmp target is known */
@@ -1828,13 +1835,15 @@ static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
 }
 
 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
-                     struct bpf_tramp_progs *tp, int stack_size)
+                     struct bpf_tramp_progs *tp, int stack_size,
+                     bool save_ret)
 {
        int i;
        u8 *prog = *pprog;
 
        for (i = 0; i < tp->nr_progs; i++) {
-               if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false))
+               if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size,
+                                   save_ret))
                        return -EINVAL;
        }
        *pprog = prog;
@@ -1877,6 +1886,23 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
        return 0;
 }
 
+static bool is_valid_bpf_tramp_flags(unsigned int flags)
+{
+       if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
+           (flags & BPF_TRAMP_F_SKIP_FRAME))
+               return false;
+
+       /*
+        * BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops,
+        * and it must be used alone.
+        */
+       if ((flags & BPF_TRAMP_F_RET_FENTRY_RET) &&
+           (flags & ~BPF_TRAMP_F_RET_FENTRY_RET))
+               return false;
+
+       return true;
+}
+
 /* Example:
  * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
  * its 'struct btf_func_model' will be nr_args=2
@@ -1949,17 +1975,19 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
        struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
        u8 **branches = NULL;
        u8 *prog;
+       bool save_ret;
 
        /* x86-64 supports up to 6 arguments. 7+ can be added in the future */
        if (nr_args > 6)
                return -ENOTSUPP;
 
-       if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
-           (flags & BPF_TRAMP_F_SKIP_FRAME))
+       if (!is_valid_bpf_tramp_flags(flags))
                return -EINVAL;
 
-       if (flags & BPF_TRAMP_F_CALL_ORIG)
-               stack_size += 8; /* room for return value of orig_call */
+       /* room for return value of orig_call or fentry prog */
+       save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
+       if (save_ret)
+               stack_size += 8;
 
        if (flags & BPF_TRAMP_F_IP_ARG)
                stack_size += 8; /* room for IP address argument */
@@ -2005,7 +2033,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
        }
 
        if (fentry->nr_progs)
-               if (invoke_bpf(m, &prog, fentry, stack_size))
+               if (invoke_bpf(m, &prog, fentry, stack_size,
+                              flags & BPF_TRAMP_F_RET_FENTRY_RET))
                        return -EINVAL;
 
        if (fmod_ret->nr_progs) {
@@ -2052,7 +2081,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
        }
 
        if (fexit->nr_progs)
-               if (invoke_bpf(m, &prog, fexit, stack_size)) {
+               if (invoke_bpf(m, &prog, fexit, stack_size, false)) {
                        ret = -EINVAL;
                        goto cleanup;
                }
@@ -2072,9 +2101,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
                        ret = -EINVAL;
                        goto cleanup;
                }
-               /* restore original return value back into RAX */
-               emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
        }
+       /* restore return value of orig_call or fentry prog back into RAX */
+       if (save_ret)
+               emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
 
        EMIT1(0x5B); /* pop rbx */
        EMIT1(0xC9); /* leave */
index 3d41a09c2c14ca58f9303166103af28b2a0934db..5debe4ac6f8192b4c3ed89e10e8babd9919616f6 100644 (file)
@@ -113,7 +113,7 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
                                 false /* no mapping of GSI to PIRQ */);
 }
 
-#ifdef CONFIG_XEN_DOM0
+#ifdef CONFIG_XEN_PV_DOM0
 static int xen_register_gsi(u32 gsi, int triggering, int polarity)
 {
        int rc, irq;
@@ -261,7 +261,7 @@ error:
        return irq;
 }
 
-#ifdef CONFIG_XEN_DOM0
+#ifdef CONFIG_XEN_PV_DOM0
 static bool __read_mostly pci_seg_supported = true;
 
 static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
@@ -375,10 +375,10 @@ static void xen_initdom_restore_msi_irqs(struct pci_dev *dev)
                WARN(ret && ret != -ENOSYS, "restore_msi -> %d\n", ret);
        }
 }
-#else /* CONFIG_XEN_DOM0 */
+#else /* CONFIG_XEN_PV_DOM0 */
 #define xen_initdom_setup_msi_irqs     NULL
 #define xen_initdom_restore_msi_irqs   NULL
-#endif /* !CONFIG_XEN_DOM0 */
+#endif /* !CONFIG_XEN_PV_DOM0 */
 
 static void xen_teardown_msi_irqs(struct pci_dev *dev)
 {
@@ -555,7 +555,7 @@ int __init pci_xen_hvm_init(void)
        return 0;
 }
 
-#ifdef CONFIG_XEN_DOM0
+#ifdef CONFIG_XEN_PV_DOM0
 int __init pci_xen_initial_domain(void)
 {
        int irq;
@@ -583,6 +583,9 @@ int __init pci_xen_initial_domain(void)
        }
        return 0;
 }
+#endif
+
+#ifdef CONFIG_XEN_DOM0
 
 struct xen_device_domain_owner {
        domid_t domain;
@@ -656,4 +659,4 @@ int xen_unregister_device_domain_owner(struct pci_dev *dev)
        return 0;
 }
 EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner);
-#endif
+#endif /* CONFIG_XEN_DOM0 */
index ee2beda590d0d0156e2dd0f0febf45b71452cdd3..1d4a00e767ece00edc2a0ed8b3fcdfe160ee7b54 100644 (file)
@@ -274,7 +274,7 @@ static struct olpc_ec_driver ec_xo1_driver = {
 
 static struct olpc_ec_driver ec_xo1_5_driver = {
        .ec_cmd = olpc_xo1_ec_cmd,
-#ifdef CONFIG_OLPC_XO1_5_SCI
+#ifdef CONFIG_OLPC_XO15_SCI
        /*
         * XO-1.5 EC wakeups are available when olpc-xo15-sci driver is
         * compiled in
index 9ac7457f52a3cd1d4c8e32585b3a18c44a415b5b..ed0442e354344fad84fce7fcc29c67934828a9f9 100644 (file)
 /*
  * PVH variables.
  *
- * pvh_bootparams and pvh_start_info need to live in the data segment since
+ * pvh_bootparams and pvh_start_info need to live in a data segment since
  * they are used after startup_{32|64}, which clear .bss, are invoked.
  */
-struct boot_params pvh_bootparams __section(".data");
-struct hvm_start_info pvh_start_info __section(".data");
+struct boot_params __initdata pvh_bootparams;
+struct hvm_start_info __initdata pvh_start_info;
 
-unsigned int pvh_start_info_sz = sizeof(pvh_start_info);
+const unsigned int __initconst pvh_start_info_sz = sizeof(pvh_start_info);
 
-static u64 pvh_get_root_pointer(void)
+static u64 __init pvh_get_root_pointer(void)
 {
        return pvh_start_info.rsdp_paddr;
 }
@@ -107,7 +107,7 @@ void __init __weak xen_pvh_init(struct boot_params *boot_params)
        BUG();
 }
 
-static void hypervisor_specific_init(bool xen_guest)
+static void __init hypervisor_specific_init(bool xen_guest)
 {
        if (xen_guest)
                xen_pvh_init(&pvh_bootparams);
index afc1da68b06d80bcd8afda6989689f400f90af56..6bcd3d8ca6ac5fa46290b369e0119d9ea652c7d8 100644 (file)
@@ -43,13 +43,9 @@ config XEN_PV_SMP
        def_bool y
        depends on XEN_PV && SMP
 
-config XEN_DOM0
-       bool "Xen PV Dom0 support"
-       default y
-       depends on XEN_PV && PCI_XEN && SWIOTLB_XEN
-       depends on X86_IO_APIC && ACPI && PCI
-       help
-         Support running as a Xen PV Dom0 guest.
+config XEN_PV_DOM0
+       def_bool y
+       depends on XEN_PV && XEN_DOM0
 
 config XEN_PVHVM
        def_bool y
@@ -86,3 +82,12 @@ config XEN_PVH
        def_bool n
        help
          Support for running as a Xen PVH guest.
+
+config XEN_DOM0
+       bool "Xen Dom0 support"
+       default XEN_PV
+       depends on (XEN_PV && SWIOTLB_XEN) || (XEN_PVH && X86_64)
+       depends on X86_IO_APIC && ACPI && PCI
+       select X86_X2APIC if XEN_PVH && X86_64
+       help
+         Support running as a Xen Dom0 guest.
index 40b5779fce21cf85be58b626ecb4e7245ff75a4f..4953260e281c38d651b65cc0affaba99d7ecf645 100644 (file)
@@ -45,7 +45,7 @@ obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
 
 obj-$(CONFIG_XEN_DEBUG_FS)     += debugfs.o
 
-obj-$(CONFIG_XEN_DOM0)         += vga.o
+obj-$(CONFIG_XEN_PV_DOM0)      += vga.o
 
 obj-$(CONFIG_SWIOTLB_XEN)      += pci-swiotlb-xen.o
 
index c79bd0af2e8c26d819fdda1b07072c52b256effa..95d970359e1746e230eadd5c8e0ae776ad3720ba 100644 (file)
@@ -3,6 +3,7 @@
 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
 #include <linux/memblock.h>
 #endif
+#include <linux/console.h>
 #include <linux/cpu.h>
 #include <linux/kexec.h>
 #include <linux/slab.h>
 
 #include <xen/xen.h>
 #include <xen/features.h>
+#include <xen/interface/sched.h>
+#include <xen/interface/version.h>
 #include <xen/page.h>
 
 #include <asm/xen/hypercall.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/cpu.h>
 #include <asm/e820/api.h> 
+#include <asm/setup.h>
 
 #include "xen-ops.h"
 #include "smp.h"
@@ -52,9 +56,6 @@ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
 DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
 EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
 
-enum xen_domain_type xen_domain_type = XEN_NATIVE;
-EXPORT_SYMBOL_GPL(xen_domain_type);
-
 unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
 EXPORT_SYMBOL(machine_to_phys_mapping);
 unsigned long  machine_to_phys_nr;
@@ -69,10 +70,12 @@ __read_mostly int xen_have_vector_callback;
 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
 
 /*
- * NB: needs to live in .data because it's used by xen_prepare_pvh which runs
- * before clearing the bss.
+ * NB: These need to live in .data or alike because they're used by
+ * xen_prepare_pvh() which runs before clearing the bss.
  */
-uint32_t xen_start_flags __section(".data") = 0;
+enum xen_domain_type __ro_after_init xen_domain_type = XEN_NATIVE;
+EXPORT_SYMBOL_GPL(xen_domain_type);
+uint32_t __ro_after_init xen_start_flags;
 EXPORT_SYMBOL(xen_start_flags);
 
 /*
@@ -258,6 +261,45 @@ int xen_vcpu_setup(int cpu)
        return ((per_cpu(xen_vcpu, cpu) == NULL) ? -ENODEV : 0);
 }
 
+void __init xen_banner(void)
+{
+       unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
+       struct xen_extraversion extra;
+
+       HYPERVISOR_xen_version(XENVER_extraversion, &extra);
+
+       pr_info("Booting kernel on %s\n", pv_info.name);
+       pr_info("Xen version: %u.%u%s%s\n",
+               version >> 16, version & 0xffff, extra.extraversion,
+               xen_feature(XENFEAT_mmu_pt_update_preserve_ad)
+               ? " (preserve-AD)" : "");
+}
+
+/* Check if running on Xen version (major, minor) or later */
+bool xen_running_on_version_or_later(unsigned int major, unsigned int minor)
+{
+       unsigned int version;
+
+       if (!xen_domain())
+               return false;
+
+       version = HYPERVISOR_xen_version(XENVER_version, NULL);
+       if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) ||
+               ((version >> 16) > major))
+               return true;
+       return false;
+}
+
+void __init xen_add_preferred_consoles(void)
+{
+       add_preferred_console("xenboot", 0, NULL);
+       if (!boot_params.screen_info.orig_video_isVGA)
+               add_preferred_console("tty", 0, NULL);
+       add_preferred_console("hvc", 0, NULL);
+       if (boot_params.screen_info.orig_video_isVGA)
+               add_preferred_console("tty", 0, NULL);
+}
+
 void xen_reboot(int reason)
 {
        struct sched_shutdown r = { .reason = reason };
index 6e0d0754f94f5b98f3fd4757547e514d9be43fe5..a7b7d674f50058c22a9154b02f79f26aac6b616f 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/mm.h>
 #include <linux/page-flags.h>
 #include <linux/highmem.h>
-#include <linux/console.h>
 #include <linux/pci.h>
 #include <linux/gfp.h>
 #include <linux/edd.h>
@@ -109,17 +108,6 @@ struct tls_descs {
  */
 static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc);
 
-static void __init xen_banner(void)
-{
-       unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
-       struct xen_extraversion extra;
-       HYPERVISOR_xen_version(XENVER_extraversion, &extra);
-
-       pr_info("Booting paravirtualized kernel on %s\n", pv_info.name);
-       pr_info("Xen version: %d.%d%s (preserve-AD)\n",
-               version >> 16, version & 0xffff, extra.extraversion);
-}
-
 static void __init xen_pv_init_platform(void)
 {
        populate_extra_pte(fix_to_virt(FIX_PARAVIRT_BOOTMAP));
@@ -142,22 +130,6 @@ static void __init xen_pv_guest_late_init(void)
 #endif
 }
 
-/* Check if running on Xen version (major, minor) or later */
-bool
-xen_running_on_version_or_later(unsigned int major, unsigned int minor)
-{
-       unsigned int version;
-
-       if (!xen_domain())
-               return false;
-
-       version = HYPERVISOR_xen_version(XENVER_version, NULL);
-       if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) ||
-               ((version >> 16) > major))
-               return true;
-       return false;
-}
-
 static __read_mostly unsigned int cpuid_leaf5_ecx_val;
 static __read_mostly unsigned int cpuid_leaf5_edx_val;
 
@@ -1364,7 +1336,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
        boot_params.hdr.hardware_subarch = X86_SUBARCH_XEN;
 
        if (!xen_initial_domain()) {
-               add_preferred_console("xenboot", 0, NULL);
                if (pci_xen)
                        x86_init.pci.arch_init = pci_xen_init;
                x86_platform.set_legacy_features =
@@ -1409,11 +1380,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
 #endif
        }
 
-       if (!boot_params.screen_info.orig_video_isVGA)
-               add_preferred_console("tty", 0, NULL);
-       add_preferred_console("hvc", 0, NULL);
-       if (boot_params.screen_info.orig_video_isVGA)
-               add_preferred_console("tty", 0, NULL);
+       xen_add_preferred_consoles();
 
 #ifdef CONFIG_PCI
        /* PCI BIOS service won't work from a PV guest. */
index 0d5e34b9e6f93985a243cc69c632ae0d8f9e8ce1..bcae606bbc5cfd3145aefb4f3bf2c11b5e07afe0 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/acpi.h>
+#include <linux/export.h>
 
 #include <xen/hvc-console.h>
 
 /*
  * PVH variables.
  *
- * The variable xen_pvh needs to live in the data segment since it is used
+ * The variable xen_pvh needs to live in a data segment since it is used
  * after startup_{32|64} is invoked, which will clear the .bss segment.
  */
-bool xen_pvh __section(".data") = 0;
+bool __ro_after_init xen_pvh;
+EXPORT_SYMBOL_GPL(xen_pvh);
 
 void __init xen_pvh_init(struct boot_params *boot_params)
 {
@@ -36,6 +38,10 @@ void __init xen_pvh_init(struct boot_params *boot_params)
        pfn = __pa(hypercall_page);
        wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
 
+       if (xen_initial_domain())
+               x86_init.oem.arch_setup = xen_add_preferred_consoles;
+       x86_init.oem.banner = xen_banner;
+
        xen_efi_init(boot_params);
 }
 
index 8d751939c6f302690856db16cd39dad62762024f..3359c23573c50167dbc9f55030e5ea7c82f62f54 100644 (file)
@@ -2398,7 +2398,7 @@ static int remap_area_pfn_pte_fn(pte_t *ptep, unsigned long addr, void *data)
 
 int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
                  xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
-                 unsigned int domid, bool no_translate, struct page **pages)
+                 unsigned int domid, bool no_translate)
 {
        int err = 0;
        struct remap_data rmd;
index 8d7ec49a35fbb686c5ec93842ce681390039a1e1..8bc8b72a205d460743a76bfac00e88e4921c5015 100644 (file)
@@ -51,6 +51,7 @@ void __init xen_remap_memory(void);
 phys_addr_t __init xen_find_free_area(phys_addr_t size);
 char * __init xen_memory_setup(void);
 void __init xen_arch_setup(void);
+void xen_banner(void);
 void xen_enable_sysenter(void);
 void xen_enable_syscall(void);
 void xen_vcpu_restore(void);
@@ -109,7 +110,7 @@ static inline void xen_uninit_lock_cpu(int cpu)
 
 struct dom0_vga_console_info;
 
-#ifdef CONFIG_XEN_DOM0
+#ifdef CONFIG_XEN_PV_DOM0
 void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
 #else
 static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
@@ -118,6 +119,8 @@ static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
 }
 #endif
 
+void xen_add_preferred_consoles(void);
+
 void __init xen_init_apic(void);
 
 #ifdef CONFIG_XEN_EFI
index 7cbf68ca71060dec42583ac60cd2e5f47b68ed09..6fc05cba61a27563273cde66d2f99ffe56fd1ab1 100644 (file)
@@ -78,7 +78,7 @@
 #endif
 #define XCHAL_KIO_SIZE                 0x10000000
 
-#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF)
+#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_USE_OF)
 #define XCHAL_KIO_PADDR                        xtensa_get_kio_paddr()
 #ifndef __ASSEMBLY__
 extern unsigned long xtensa_kio_paddr;
index 764b54bef701943c1719f4982773b994684a84a7..15051a8a153998efe65196aecbfbf6dc91af1e02 100644 (file)
@@ -143,7 +143,7 @@ unsigned xtensa_get_ext_irq_no(unsigned irq)
 
 void __init init_IRQ(void)
 {
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
        irqchip_init();
 #else
 #ifdef CONFIG_HAVE_SMP
index ed184106e4cf945a0631f7f43d72a700b568507a..ee9082a142feb49cf187c014df9d6df8b605a939 100644 (file)
@@ -63,7 +63,7 @@ extern unsigned long initrd_end;
 extern int initrd_below_start_ok;
 #endif
 
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
 void *dtb_start = __dtb_start;
 #endif
 
@@ -125,7 +125,7 @@ __tagtable(BP_TAG_INITRD, parse_tag_initrd);
 
 #endif /* CONFIG_BLK_DEV_INITRD */
 
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
 
 static int __init parse_tag_fdt(const bp_tag_t *tag)
 {
@@ -135,7 +135,7 @@ static int __init parse_tag_fdt(const bp_tag_t *tag)
 
 __tagtable(BP_TAG_FDT, parse_tag_fdt);
 
-#endif /* CONFIG_OF */
+#endif /* CONFIG_USE_OF */
 
 static int __init parse_tag_cmdline(const bp_tag_t* tag)
 {
@@ -183,7 +183,7 @@ static int __init parse_bootparam(const bp_tag_t *tag)
 }
 #endif
 
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
 
 #if !XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY
 unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR;
@@ -232,7 +232,7 @@ void __init early_init_devtree(void *params)
                strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
 }
 
-#endif /* CONFIG_OF */
+#endif /* CONFIG_USE_OF */
 
 /*
  * Initialize architecture. (Early stage)
@@ -253,7 +253,7 @@ void __init init_arch(bp_tag_t *bp_start)
        if (bp_start)
                parse_bootparam(bp_start);
 
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
        early_init_devtree(dtb_start);
 #endif
 
index 7e4d97dc8bd8fb3ed37863a713d7bd1291f8b1f9..38acda4f04e85d5db3d91bb0a0f97819bf6dc7a9 100644 (file)
@@ -101,7 +101,7 @@ void init_mmu(void)
 
 void init_kio(void)
 {
-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_USE_OF)
        /*
         * Update the IO area mapping in case xtensa_kio_paddr has changed
         */
index 4f7d6142d41fa4c0847b8a68bf82e7067a95fce8..538e6748e85a7dbb912bd7df2e04860ff51dbb03 100644 (file)
@@ -51,8 +51,12 @@ void platform_power_off(void)
 
 void platform_restart(void)
 {
-       /* Flush and reset the mmu, simulate a processor reset, and
-        * jump to the reset vector. */
+       /* Try software reset first. */
+       WRITE_ONCE(*(u32 *)XTFPGA_SWRST_VADDR, 0xdead);
+
+       /* If software reset did not work, flush and reset the mmu,
+        * simulate a processor reset, and jump to the reset vector.
+        */
        cpu_reset();
        /* control never gets here */
 }
@@ -66,7 +70,7 @@ void __init platform_calibrate_ccount(void)
 
 #endif
 
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
 
 static void __init xtfpga_clk_setup(struct device_node *np)
 {
@@ -284,4 +288,4 @@ static int __init xtavnet_init(void)
  */
 arch_initcall(xtavnet_init);
 
-#endif /* CONFIG_OF */
+#endif /* CONFIG_USE_OF */
index cf2780cb44a74fddb4624aecb22161203b62d6b7..485a258b0ab37ee7cab7b9e28411dd0032f19830 100644 (file)
@@ -490,7 +490,6 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
        bdev = I_BDEV(inode);
        mutex_init(&bdev->bd_fsfreeze_mutex);
        spin_lock_init(&bdev->bd_size_lock);
-       bdev->bd_disk = disk;
        bdev->bd_partno = partno;
        bdev->bd_inode = inode;
        bdev->bd_stats = alloc_percpu(struct disk_stats);
@@ -498,6 +497,7 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
                iput(inode);
                return NULL;
        }
+       bdev->bd_disk = disk;
        return bdev;
 }
 
index e2f14508f2d6eff98274448b68b8139a80872098..85b8e1c3a762d462936e3db1699d236e75087982 100644 (file)
@@ -666,6 +666,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
                bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
        bfqg_and_blkg_put(bfqq_group(bfqq));
 
+       if (entity->parent &&
+           entity->parent->last_bfqq_created == bfqq)
+               entity->parent->last_bfqq_created = NULL;
+       else if (bfqd->last_bfqq_created == bfqq)
+               bfqd->last_bfqq_created = NULL;
+
        entity->parent = bfqg->my_entity;
        entity->sched_data = &bfqg->sched_data;
        /* pin down bfqg and its associated blkg  */
index dd13c2bbc29c1164b6d01432b28c13545cad8a90..480e1a13485966104362f265d842887640775781 100644 (file)
@@ -2662,15 +2662,6 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
         * are likely to increase the throughput.
         */
        bfqq->new_bfqq = new_bfqq;
-       /*
-        * The above assignment schedules the following redirections:
-        * each time some I/O for bfqq arrives, the process that
-        * generated that I/O is disassociated from bfqq and
-        * associated with new_bfqq. Here we increases new_bfqq->ref
-        * in advance, adding the number of processes that are
-        * expected to be associated with new_bfqq as they happen to
-        * issue I/O.
-        */
        new_bfqq->ref += process_refs;
        return new_bfqq;
 }
@@ -2733,10 +2724,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
 {
        struct bfq_queue *in_service_bfqq, *new_bfqq;
 
-       /* if a merge has already been setup, then proceed with that first */
-       if (bfqq->new_bfqq)
-               return bfqq->new_bfqq;
-
        /*
         * Check delayed stable merge for rotational or non-queueing
         * devs. For this branch to be executed, bfqq must not be
@@ -2838,6 +2825,9 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
        if (bfq_too_late_for_merging(bfqq))
                return NULL;
 
+       if (bfqq->new_bfqq)
+               return bfqq->new_bfqq;
+
        if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
                return NULL;
 
index 38b9f7684952a71607a0c7869fcd885995ed80e9..9a1c5839dd469d58c00a611c5989d07360b81fe6 100644 (file)
@@ -1897,10 +1897,11 @@ void blk_cgroup_bio_start(struct bio *bio)
 {
        int rwd = blk_cgroup_io_type(bio), cpu;
        struct blkg_iostat_set *bis;
+       unsigned long flags;
 
        cpu = get_cpu();
        bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu);
-       u64_stats_update_begin(&bis->sync);
+       flags = u64_stats_update_begin_irqsave(&bis->sync);
 
        /*
         * If the bio is flagged with BIO_CGROUP_ACCT it means this is a split
@@ -1912,7 +1913,7 @@ void blk_cgroup_bio_start(struct bio *bio)
        }
        bis->cur.ios[rwd]++;
 
-       u64_stats_update_end(&bis->sync);
+       u64_stats_update_end_irqrestore(&bis->sync, flags);
        if (cgroup_subsys_on_dfl(io_cgrp_subsys))
                cgroup_rstat_updated(bio->bi_blkg->blkcg->css.cgroup, cpu);
        put_cpu();
index 5454db2fa263b0d87bce30058bf7052d37f2962d..4d8f5fe915887e1dc17f4ebe58482293945edddd 100644 (file)
@@ -49,7 +49,6 @@
 #include "blk-mq.h"
 #include "blk-mq-sched.h"
 #include "blk-pm.h"
-#include "blk-rq-qos.h"
 
 struct dentry *blk_debugfs_root;
 
@@ -337,23 +336,25 @@ void blk_put_queue(struct request_queue *q)
 }
 EXPORT_SYMBOL(blk_put_queue);
 
-void blk_set_queue_dying(struct request_queue *q)
+void blk_queue_start_drain(struct request_queue *q)
 {
-       blk_queue_flag_set(QUEUE_FLAG_DYING, q);
-
        /*
         * When queue DYING flag is set, we need to block new req
         * entering queue, so we call blk_freeze_queue_start() to
         * prevent I/O from crossing blk_queue_enter().
         */
        blk_freeze_queue_start(q);
-
        if (queue_is_mq(q))
                blk_mq_wake_waiters(q);
-
        /* Make blk_queue_enter() reexamine the DYING flag. */
        wake_up_all(&q->mq_freeze_wq);
 }
+
+void blk_set_queue_dying(struct request_queue *q)
+{
+       blk_queue_flag_set(QUEUE_FLAG_DYING, q);
+       blk_queue_start_drain(q);
+}
 EXPORT_SYMBOL_GPL(blk_set_queue_dying);
 
 /**
@@ -385,13 +386,8 @@ void blk_cleanup_queue(struct request_queue *q)
         */
        blk_freeze_queue(q);
 
-       rq_qos_exit(q);
-
        blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
 
-       /* for synchronous bio-based driver finish in-flight integrity i/o */
-       blk_flush_integrity();
-
        blk_sync_queue(q);
        if (queue_is_mq(q))
                blk_mq_exit_queue(q);
@@ -416,6 +412,30 @@ void blk_cleanup_queue(struct request_queue *q)
 }
 EXPORT_SYMBOL(blk_cleanup_queue);
 
+static bool blk_try_enter_queue(struct request_queue *q, bool pm)
+{
+       rcu_read_lock();
+       if (!percpu_ref_tryget_live(&q->q_usage_counter))
+               goto fail;
+
+       /*
+        * The code that increments the pm_only counter must ensure that the
+        * counter is globally visible before the queue is unfrozen.
+        */
+       if (blk_queue_pm_only(q) &&
+           (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
+               goto fail_put;
+
+       rcu_read_unlock();
+       return true;
+
+fail_put:
+       percpu_ref_put(&q->q_usage_counter);
+fail:
+       rcu_read_unlock();
+       return false;
+}
+
 /**
  * blk_queue_enter() - try to increase q->q_usage_counter
  * @q: request queue pointer
@@ -425,40 +445,18 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
 {
        const bool pm = flags & BLK_MQ_REQ_PM;
 
-       while (true) {
-               bool success = false;
-
-               rcu_read_lock();
-               if (percpu_ref_tryget_live(&q->q_usage_counter)) {
-                       /*
-                        * The code that increments the pm_only counter is
-                        * responsible for ensuring that that counter is
-                        * globally visible before the queue is unfrozen.
-                        */
-                       if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) ||
-                           !blk_queue_pm_only(q)) {
-                               success = true;
-                       } else {
-                               percpu_ref_put(&q->q_usage_counter);
-                       }
-               }
-               rcu_read_unlock();
-
-               if (success)
-                       return 0;
-
+       while (!blk_try_enter_queue(q, pm)) {
                if (flags & BLK_MQ_REQ_NOWAIT)
                        return -EBUSY;
 
                /*
-                * read pair of barrier in blk_freeze_queue_start(),
-                * we need to order reading __PERCPU_REF_DEAD flag of
-                * .q_usage_counter and reading .mq_freeze_depth or
-                * queue dying flag, otherwise the following wait may
-                * never return if the two reads are reordered.
+                * read pair of barrier in blk_freeze_queue_start(), we need to
+                * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
+                * reading .mq_freeze_depth or queue dying flag, otherwise the
+                * following wait may never return if the two reads are
+                * reordered.
                 */
                smp_rmb();
-
                wait_event(q->mq_freeze_wq,
                           (!q->mq_freeze_depth &&
                            blk_pm_resume_queue(pm, q)) ||
@@ -466,23 +464,43 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
                if (blk_queue_dying(q))
                        return -ENODEV;
        }
+
+       return 0;
 }
 
 static inline int bio_queue_enter(struct bio *bio)
 {
-       struct request_queue *q = bio->bi_bdev->bd_disk->queue;
-       bool nowait = bio->bi_opf & REQ_NOWAIT;
-       int ret;
+       struct gendisk *disk = bio->bi_bdev->bd_disk;
+       struct request_queue *q = disk->queue;
 
-       ret = blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0);
-       if (unlikely(ret)) {
-               if (nowait && !blk_queue_dying(q))
+       while (!blk_try_enter_queue(q, false)) {
+               if (bio->bi_opf & REQ_NOWAIT) {
+                       if (test_bit(GD_DEAD, &disk->state))
+                               goto dead;
                        bio_wouldblock_error(bio);
-               else
-                       bio_io_error(bio);
+                       return -EBUSY;
+               }
+
+               /*
+                * read pair of barrier in blk_freeze_queue_start(), we need to
+                * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
+                * reading .mq_freeze_depth or queue dying flag, otherwise the
+                * following wait may never return if the two reads are
+                * reordered.
+                */
+               smp_rmb();
+               wait_event(q->mq_freeze_wq,
+                          (!q->mq_freeze_depth &&
+                           blk_pm_resume_queue(false, q)) ||
+                          test_bit(GD_DEAD, &disk->state));
+               if (test_bit(GD_DEAD, &disk->state))
+                       goto dead;
        }
 
-       return ret;
+       return 0;
+dead:
+       bio_io_error(bio);
+       return -ENODEV;
 }
 
 void blk_queue_exit(struct request_queue *q)
@@ -899,11 +917,18 @@ static blk_qc_t __submit_bio(struct bio *bio)
        struct gendisk *disk = bio->bi_bdev->bd_disk;
        blk_qc_t ret = BLK_QC_T_NONE;
 
-       if (blk_crypto_bio_prep(&bio)) {
-               if (!disk->fops->submit_bio)
-                       return blk_mq_submit_bio(bio);
+       if (unlikely(bio_queue_enter(bio) != 0))
+               return BLK_QC_T_NONE;
+
+       if (!submit_bio_checks(bio) || !blk_crypto_bio_prep(&bio))
+               goto queue_exit;
+       if (disk->fops->submit_bio) {
                ret = disk->fops->submit_bio(bio);
+               goto queue_exit;
        }
+       return blk_mq_submit_bio(bio);
+
+queue_exit:
        blk_queue_exit(disk->queue);
        return ret;
 }
@@ -941,9 +966,6 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
                struct request_queue *q = bio->bi_bdev->bd_disk->queue;
                struct bio_list lower, same;
 
-               if (unlikely(bio_queue_enter(bio) != 0))
-                       continue;
-
                /*
                 * Create a fresh bio_list for all subordinate requests.
                 */
@@ -979,23 +1001,12 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
 static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
 {
        struct bio_list bio_list[2] = { };
-       blk_qc_t ret = BLK_QC_T_NONE;
+       blk_qc_t ret;
 
        current->bio_list = bio_list;
 
        do {
-               struct gendisk *disk = bio->bi_bdev->bd_disk;
-
-               if (unlikely(bio_queue_enter(bio) != 0))
-                       continue;
-
-               if (!blk_crypto_bio_prep(&bio)) {
-                       blk_queue_exit(disk->queue);
-                       ret = BLK_QC_T_NONE;
-                       continue;
-               }
-
-               ret = blk_mq_submit_bio(bio);
+               ret = __submit_bio(bio);
        } while ((bio = bio_list_pop(&bio_list[0])));
 
        current->bio_list = NULL;
@@ -1013,9 +1024,6 @@ static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
  */
 blk_qc_t submit_bio_noacct(struct bio *bio)
 {
-       if (!submit_bio_checks(bio))
-               return BLK_QC_T_NONE;
-
        /*
         * We only want one ->submit_bio to be active at a time, else stack
         * usage with stacked devices could be a problem.  Use current->bio_list
index 4b66d2776edac02dd73c16a85e51d629d4f0dd1a..3b38d15723de182d66151cde2d14a9d519a43691 100644 (file)
@@ -129,6 +129,7 @@ static const char *const blk_queue_flag_name[] = {
        QUEUE_FLAG_NAME(PCI_P2PDMA),
        QUEUE_FLAG_NAME(ZONE_RESETALL),
        QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
+       QUEUE_FLAG_NAME(HCTX_ACTIVE),
        QUEUE_FLAG_NAME(NOWAIT),
 };
 #undef QUEUE_FLAG_NAME
index 108a352051be5fea9c7a5c132576844c3f3ee8a7..bc026372de4394eb003fde3c1b0de7a79df1fdc0 100644 (file)
@@ -188,9 +188,11 @@ void blk_mq_freeze_queue(struct request_queue *q)
 }
 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
 
-void blk_mq_unfreeze_queue(struct request_queue *q)
+void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
 {
        mutex_lock(&q->mq_freeze_lock);
+       if (force_atomic)
+               q->q_usage_counter.data->force_atomic = true;
        q->mq_freeze_depth--;
        WARN_ON_ONCE(q->mq_freeze_depth < 0);
        if (!q->mq_freeze_depth) {
@@ -199,6 +201,11 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
        }
        mutex_unlock(&q->mq_freeze_lock);
 }
+
+void blk_mq_unfreeze_queue(struct request_queue *q)
+{
+       __blk_mq_unfreeze_queue(q, false);
+}
 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
 
 /*
index 7d2a0ba7ed21da27662f712ebdc69905215814fc..6c3c00a8fe19d3f4f926d3deb9f26d63577c6f3c 100644 (file)
@@ -51,6 +51,8 @@ struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
 void blk_free_flush_queue(struct blk_flush_queue *q);
 
 void blk_freeze_queue(struct request_queue *q);
+void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
+void blk_queue_start_drain(struct request_queue *q);
 
 #define BIO_INLINE_VECS 4
 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
index 7b6e5e1cf95642adaa6a6fb53e902b6466d1f0f7..b49858550fa6e2fabf50d54bf03630f38e2be304 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/badblocks.h>
 
 #include "blk.h"
+#include "blk-rq-qos.h"
 
 static struct kobject *block_depr;
 
@@ -559,6 +560,8 @@ EXPORT_SYMBOL(device_add_disk);
  */
 void del_gendisk(struct gendisk *disk)
 {
+       struct request_queue *q = disk->queue;
+
        might_sleep();
 
        if (WARN_ON_ONCE(!disk_live(disk) && !(disk->flags & GENHD_FL_HIDDEN)))
@@ -575,8 +578,27 @@ void del_gendisk(struct gendisk *disk)
        fsync_bdev(disk->part0);
        __invalidate_device(disk->part0, true);
 
+       /*
+        * Fail any new I/O.
+        */
+       set_bit(GD_DEAD, &disk->state);
        set_capacity(disk, 0);
 
+       /*
+        * Prevent new I/O from crossing bio_queue_enter().
+        */
+       blk_queue_start_drain(q);
+       blk_mq_freeze_queue_wait(q);
+
+       rq_qos_exit(q);
+       blk_sync_queue(q);
+       blk_flush_integrity();
+       /*
+        * Allow using passthrough request again after the queue is torn down.
+        */
+       blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
+       __blk_mq_unfreeze_queue(q, true);
+
        if (!(disk->flags & GENHD_FL_HIDDEN)) {
                sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
 
@@ -1056,6 +1078,7 @@ static void disk_release(struct device *dev)
        struct gendisk *disk = dev_to_disk(dev);
 
        might_sleep();
+       WARN_ON_ONCE(disk_live(disk));
 
        disk_release_events(disk);
        kfree(disk->random);
@@ -1268,6 +1291,7 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
 
 out_destroy_part_tbl:
        xa_destroy(&disk->part_tbl);
+       disk->part0->bd_disk = NULL;
        iput(disk->part0->bd_inode);
 out_free_bdi:
        bdi_put(disk->bdi);
index 15a8be57203d6420df3d73a37736adda1a168019..a0ffbabfac2c61c12139163b595694bb0d8f6493 100644 (file)
@@ -151,6 +151,7 @@ struct kyber_ctx_queue {
 
 struct kyber_queue_data {
        struct request_queue *q;
+       dev_t dev;
 
        /*
         * Each scheduling domain has a limited number of in-flight requests
@@ -257,7 +258,7 @@ static int calculate_percentile(struct kyber_queue_data *kqd,
        }
        memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
 
-       trace_kyber_latency(kqd->q, kyber_domain_names[sched_domain],
+       trace_kyber_latency(kqd->dev, kyber_domain_names[sched_domain],
                            kyber_latency_type_names[type], percentile,
                            bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples);
 
@@ -270,7 +271,7 @@ static void kyber_resize_domain(struct kyber_queue_data *kqd,
        depth = clamp(depth, 1U, kyber_depth[sched_domain]);
        if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
                sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
-               trace_kyber_adjust(kqd->q, kyber_domain_names[sched_domain],
+               trace_kyber_adjust(kqd->dev, kyber_domain_names[sched_domain],
                                   depth);
        }
 }
@@ -366,6 +367,7 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
                goto err;
 
        kqd->q = q;
+       kqd->dev = disk_devt(q->disk);
 
        kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency,
                                            GFP_KERNEL | __GFP_ZERO);
@@ -774,7 +776,7 @@ kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
                        list_del_init(&rq->queuelist);
                        return rq;
                } else {
-                       trace_kyber_throttled(kqd->q,
+                       trace_kyber_throttled(kqd->dev,
                                              kyber_domain_names[khd->cur_domain]);
                }
        } else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) {
@@ -787,7 +789,7 @@ kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
                        list_del_init(&rq->queuelist);
                        return rq;
                } else {
-                       trace_kyber_throttled(kqd->q,
+                       trace_kyber_throttled(kqd->dev,
                                              kyber_domain_names[khd->cur_domain]);
                }
        }
index 58c4c362c94f9854cd96e7293829e4a9096aeeeb..7bea19dd9458f8d6d7a275ca494a91fe621c57aa 100644 (file)
@@ -423,6 +423,7 @@ out_del:
        device_del(pdev);
 out_put:
        put_device(pdev);
+       return ERR_PTR(err);
 out_put_disk:
        put_disk(disk);
        return ERR_PTR(err);
index 30d2db37cc8735195bae14633cc08f9b94f1ea76..0d399ddaa185a7cdf5974f7f294f50032747f577 100644 (file)
@@ -17,6 +17,8 @@ source "drivers/bus/Kconfig"
 
 source "drivers/connector/Kconfig"
 
+source "drivers/firmware/Kconfig"
+
 source "drivers/gnss/Kconfig"
 
 source "drivers/mtd/Kconfig"
index 0a0a982f9c28d44e5cf48b170662c0df40852272..c0e77c1c8e09d6fa15094b2fff28d1c7bef0aed4 100644 (file)
@@ -36,7 +36,7 @@ struct acpi_gtdt_descriptor {
 
 static struct acpi_gtdt_descriptor acpi_gtdt_desc __initdata;
 
-static inline void *next_platform_timer(void *platform_timer)
+static inline __init void *next_platform_timer(void *platform_timer)
 {
        struct acpi_gtdt_header *gh = platform_timer;
 
index a3ef6cce644cc071e5d58e5698cc1bf52947b6df..7dd80acf92c78c08870ff26c035d46b64cfff5cc 100644 (file)
@@ -3007,6 +3007,18 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
                ndr_desc->target_node = NUMA_NO_NODE;
        }
 
+       /* Fallback to address based numa information if node lookup failed */
+       if (ndr_desc->numa_node == NUMA_NO_NODE) {
+               ndr_desc->numa_node = memory_add_physaddr_to_nid(spa->address);
+               dev_info(acpi_desc->dev, "changing numa node from %d to %d for nfit region [%pa-%pa]",
+                       NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
+       }
+       if (ndr_desc->target_node == NUMA_NO_NODE) {
+               ndr_desc->target_node = phys_to_target_node(spa->address);
+               dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]",
+                       NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
+       }
+
        /*
         * Persistence domain bits are hierarchical, if
         * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
index b9863e22b952d39f4397a8e5da90b5a756a5dd46..f0ed4414edb1fc75f7959bbe38529906aba33d2c 100644 (file)
@@ -1035,13 +1035,8 @@ void acpi_turn_off_unused_power_resources(void)
        list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) {
                mutex_lock(&resource->resource_lock);
 
-               /*
-                * Turn off power resources in an unknown state too, because the
-                * platform firmware on some system expects the OS to turn off
-                * power resources without any users unconditionally.
-                */
                if (!resource->ref_count &&
-                   resource->state != ACPI_POWER_RESOURCE_STATE_OFF) {
+                   resource->state == ACPI_POWER_RESOURCE_STATE_ON) {
                        acpi_handle_debug(resource->device.handle, "Turning OFF\n");
                        __acpi_power_off(resource);
                }
index f9383736fa0feb0bb3037eda7fae30116bbe7152..71419eb16e09fab1fe18e7cb6d46fc2930d0763d 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/earlycpio.h>
 #include <linux/initrd.h>
 #include <linux/security.h>
+#include <linux/kmemleak.h>
 #include "internal.h"
 
 #ifdef CONFIG_ACPI_CUSTOM_DSDT
@@ -601,6 +602,8 @@ void __init acpi_table_upgrade(void)
         */
        arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
 
+       kmemleak_ignore_phys(acpi_tables_addr);
+
        /*
         * early_ioremap only can remap 256k one time. If we map all
         * tables one time, we will hit the limit. Need to map chunks
index bd92b549fd5a4cae9f96f2cc84b9337ab976daa0..1c48358b43ba30306a84894bfadb4444e7896176 100644 (file)
@@ -371,7 +371,7 @@ static int lps0_device_attach(struct acpi_device *adev,
                return 0;
 
        if (acpi_s2idle_vendor_amd()) {
-               /* AMD0004, AMDI0005:
+               /* AMD0004, AMD0005, AMDI0005:
                 * - Should use rev_id 0x0
                 * - function mask > 0x3: Should use AMD method, but has off by one bug
                 * - function mask = 0x3: Should use Microsoft method
@@ -390,6 +390,7 @@ static int lps0_device_attach(struct acpi_device *adev,
                                        ACPI_LPS0_DSM_UUID_MICROSOFT, 0,
                                        &lps0_dsm_guid_microsoft);
                if (lps0_dsm_func_mask > 0x3 && (!strcmp(hid, "AMD0004") ||
+                                                !strcmp(hid, "AMD0005") ||
                                                 !strcmp(hid, "AMDI0005"))) {
                        lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
                        acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
index b2f55208829187d5234b505e1bdc28c8c0c5fdd8..0910441321f729bc9ea449c326e83c776c3906df 100644 (file)
@@ -440,10 +440,7 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
        hpriv->phy_regulator = devm_regulator_get(dev, "phy");
        if (IS_ERR(hpriv->phy_regulator)) {
                rc = PTR_ERR(hpriv->phy_regulator);
-               if (rc == -EPROBE_DEFER)
-                       goto err_out;
-               rc = 0;
-               hpriv->phy_regulator = NULL;
+               goto err_out;
        }
 
        if (flags & AHCI_PLATFORM_GET_RESETS) {
index c3e6592712c4b8ef3765848ef363e1fe74057845..0a8bf09a5c19e5887ae463e0a581a04e366e4dd2 100644 (file)
@@ -352,7 +352,8 @@ static unsigned int pdc_data_xfer_vlb(struct ata_queued_cmd *qc,
                        iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
 
                if (unlikely(slop)) {
-                       __le32 pad;
+                       __le32 pad = 0;
+
                        if (rw == READ) {
                                pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
                                memcpy(buf + buflen - slop, &pad, slop);
@@ -742,7 +743,8 @@ static unsigned int vlb32_data_xfer(struct ata_queued_cmd *qc,
                        ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
 
                if (unlikely(slop)) {
-                       __le32 pad;
+                       __le32 pad = 0;
+
                        if (rw == WRITE) {
                                memcpy(&pad, buf + buflen - slop, slop);
                                iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
index 9d86203e1e7a10af58e2b6cc2c6c410437260aa2..c53633d47bfb31627c085b71c03bbb7268010bda 100644 (file)
@@ -3896,8 +3896,8 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
                break;
 
        default:
-               dev_err(host->dev, "BUG: invalid board index %u\n", board_idx);
-               return 1;
+               dev_alert(host->dev, "BUG: invalid board index %u\n", board_idx);
+               return -EINVAL;
        }
 
        hpriv->hp_flags = hp_flags;
index e65dd803a453cae9aab09c62685422c53e4b64b8..249da496581a0dac798502d32dc8ebf016a7c63c 100644 (file)
@@ -95,12 +95,29 @@ int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
 
        list_add(&link->s_hook, &sup->consumers);
        list_add(&link->c_hook, &con->suppliers);
+       pr_debug("%pfwP Linked as a fwnode consumer to %pfwP\n",
+                con, sup);
 out:
        mutex_unlock(&fwnode_link_lock);
 
        return ret;
 }
 
+/**
+ * __fwnode_link_del - Delete a link between two fwnode_handles.
+ * @link: the fwnode_link to be deleted
+ *
+ * The fwnode_link_lock needs to be held when this function is called.
+ */
+static void __fwnode_link_del(struct fwnode_link *link)
+{
+       pr_debug("%pfwP Dropping the fwnode link to %pfwP\n",
+                link->consumer, link->supplier);
+       list_del(&link->s_hook);
+       list_del(&link->c_hook);
+       kfree(link);
+}
+
 /**
  * fwnode_links_purge_suppliers - Delete all supplier links of fwnode_handle.
  * @fwnode: fwnode whose supplier links need to be deleted
@@ -112,11 +129,8 @@ static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode)
        struct fwnode_link *link, *tmp;
 
        mutex_lock(&fwnode_link_lock);
-       list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
-               list_del(&link->s_hook);
-               list_del(&link->c_hook);
-               kfree(link);
-       }
+       list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook)
+               __fwnode_link_del(link);
        mutex_unlock(&fwnode_link_lock);
 }
 
@@ -131,11 +145,8 @@ static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode)
        struct fwnode_link *link, *tmp;
 
        mutex_lock(&fwnode_link_lock);
-       list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
-               list_del(&link->s_hook);
-               list_del(&link->c_hook);
-               kfree(link);
-       }
+       list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook)
+               __fwnode_link_del(link);
        mutex_unlock(&fwnode_link_lock);
 }
 
@@ -676,7 +687,8 @@ struct device_link *device_link_add(struct device *consumer,
 {
        struct device_link *link;
 
-       if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS ||
+       if (!consumer || !supplier || consumer == supplier ||
+           flags & ~DL_ADD_VALID_FLAGS ||
            (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
            (flags & DL_FLAG_SYNC_STATE_ONLY &&
             (flags & ~DL_FLAG_INFERRED) != DL_FLAG_SYNC_STATE_ONLY) ||
@@ -975,6 +987,7 @@ int device_links_check_suppliers(struct device *dev)
 {
        struct device_link *link;
        int ret = 0;
+       struct fwnode_handle *sup_fw;
 
        /*
         * Device waiting for supplier to become available is not allowed to
@@ -983,10 +996,11 @@ int device_links_check_suppliers(struct device *dev)
        mutex_lock(&fwnode_link_lock);
        if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) &&
            !fw_devlink_is_permissive()) {
-               dev_dbg(dev, "probe deferral - wait for supplier %pfwP\n",
-                       list_first_entry(&dev->fwnode->suppliers,
-                       struct fwnode_link,
-                       c_hook)->supplier);
+               sup_fw = list_first_entry(&dev->fwnode->suppliers,
+                                         struct fwnode_link,
+                                         c_hook)->supplier;
+               dev_err_probe(dev, -EPROBE_DEFER, "wait for supplier %pfwP\n",
+                             sup_fw);
                mutex_unlock(&fwnode_link_lock);
                return -EPROBE_DEFER;
        }
@@ -1001,8 +1015,9 @@ int device_links_check_suppliers(struct device *dev)
                if (link->status != DL_STATE_AVAILABLE &&
                    !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
                        device_links_missing_supplier(dev);
-                       dev_dbg(dev, "probe deferral - supplier %s not ready\n",
-                               dev_name(link->supplier));
+                       dev_err_probe(dev, -EPROBE_DEFER,
+                                     "supplier %s not ready\n",
+                                     dev_name(link->supplier));
                        ret = -EPROBE_DEFER;
                        break;
                }
@@ -1722,6 +1737,25 @@ static int fw_devlink_create_devlink(struct device *con,
        struct device *sup_dev;
        int ret = 0;
 
+       /*
+        * In some cases, a device P might also be a supplier to its child node
+        * C. However, this would defer the probe of C until the probe of P
+        * completes successfully. This is perfectly fine in the device driver
+        * model. device_add() doesn't guarantee probe completion of the device
+        * by the time it returns.
+        *
+        * However, there are a few drivers that assume C will finish probing
+        * as soon as it's added and before P finishes probing. So, we provide
+        * a flag to let fw_devlink know not to delay the probe of C until the
+        * probe of P completes successfully.
+        *
+        * When such a flag is set, we can't create device links where P is the
+        * supplier of C as that would delay the probe of C.
+        */
+       if (sup_handle->flags & FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD &&
+           fwnode_is_ancestor_of(sup_handle, con->fwnode))
+               return -EINVAL;
+
        sup_dev = get_dev_from_fwnode(sup_handle);
        if (sup_dev) {
                /*
@@ -1772,14 +1806,21 @@ static int fw_devlink_create_devlink(struct device *con,
         * be broken by applying logic. Check for these types of cycles and
         * break them so that devices in the cycle probe properly.
         *
-        * If the supplier's parent is dependent on the consumer, then
-        * the consumer-supplier dependency is a false dependency. So,
-        * treat it as an invalid link.
+        * If the supplier's parent is dependent on the consumer, then the
+        * consumer and supplier have a cyclic dependency. Since fw_devlink
+        * can't tell which of the inferred dependencies are incorrect, don't
+        * enforce probe ordering between any of the devices in this cyclic
+        * dependency. Do this by relaxing all the fw_devlink device links in
+        * this cycle and by treating the fwnode link between the consumer and
+        * the supplier as an invalid dependency.
         */
        sup_dev = fwnode_get_next_parent_dev(sup_handle);
        if (sup_dev && device_is_dependent(con, sup_dev)) {
-               dev_dbg(con, "Not linking to %pfwP - False link\n",
-                       sup_handle);
+               dev_info(con, "Fixing up cyclic dependency with %pfwP (%s)\n",
+                        sup_handle, dev_name(sup_dev));
+               device_links_write_lock();
+               fw_devlink_relax_cycle(con, sup_dev);
+               device_links_write_unlock();
                ret = -EINVAL;
        } else {
                /*
@@ -1858,9 +1899,7 @@ static void __fw_devlink_link_to_consumers(struct device *dev)
                if (!own_link || ret == -EAGAIN)
                        continue;
 
-               list_del(&link->s_hook);
-               list_del(&link->c_hook);
-               kfree(link);
+               __fwnode_link_del(link);
        }
 }
 
@@ -1912,9 +1951,7 @@ static void __fw_devlink_link_to_suppliers(struct device *dev,
                if (!own_link || ret == -EAGAIN)
                        continue;
 
-               list_del(&link->s_hook);
-               list_del(&link->c_hook);
-               kfree(link);
+               __fwnode_link_del(link);
 
                /* If no device link was created, nothing more to do. */
                if (ret)
index cfa29dc89bbff456a366ff6057a57a22e46dcf19..fabf87058d80bc0610f2e7cff25b29c91821cb56 100644 (file)
@@ -281,14 +281,14 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
        if (!blk)
                return -ENOMEM;
 
+       rbnode->block = blk;
+
        if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
                present = krealloc(rbnode->cache_present,
                                   BITS_TO_LONGS(blklen) * sizeof(*present),
                                   GFP_KERNEL);
-               if (!present) {
-                       kfree(blk);
+               if (!present)
                        return -ENOMEM;
-               }
 
                memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
                       (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
@@ -305,7 +305,6 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
        }
 
        /* update the rbnode block, its size and the base register */
-       rbnode->block = blk;
        rbnode->blklen = blklen;
        rbnode->base_reg = base_reg;
        rbnode->cache_present = present;
index 64b2f3d744d51aa91d98d64012adb8fce6bae54b..7f76fee6f989d85d64bd29bb894003b0d5a2509b 100644 (file)
@@ -2,4 +2,4 @@
 obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE)  += test_async_driver_probe.o
 
 obj-$(CONFIG_DRIVER_PE_KUNIT_TEST) += property-entry-test.o
-CFLAGS_REMOVE_property-entry-test.o += -fplugin-arg-structleak_plugin-byref -fplugin-arg-structleak_plugin-byref-all
+CFLAGS_property-entry-test.o += $(DISABLE_STRUCTLEAK_PLUGIN)
index 58ec167aa01837b825e9280cf06edfdcff893f0c..530b3124020312099044a64a3545033a31d774c9 100644 (file)
@@ -373,10 +373,22 @@ static int brd_alloc(int i)
        struct gendisk *disk;
        char buf[DISK_NAME_LEN];
 
+       mutex_lock(&brd_devices_mutex);
+       list_for_each_entry(brd, &brd_devices, brd_list) {
+               if (brd->brd_number == i) {
+                       mutex_unlock(&brd_devices_mutex);
+                       return -EEXIST;
+               }
+       }
        brd = kzalloc(sizeof(*brd), GFP_KERNEL);
-       if (!brd)
+       if (!brd) {
+               mutex_unlock(&brd_devices_mutex);
                return -ENOMEM;
+       }
        brd->brd_number         = i;
+       list_add_tail(&brd->brd_list, &brd_devices);
+       mutex_unlock(&brd_devices_mutex);
+
        spin_lock_init(&brd->brd_lock);
        INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
 
@@ -411,37 +423,30 @@ static int brd_alloc(int i)
        blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
        blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
        add_disk(disk);
-       list_add_tail(&brd->brd_list, &brd_devices);
 
        return 0;
 
 out_free_dev:
+       mutex_lock(&brd_devices_mutex);
+       list_del(&brd->brd_list);
+       mutex_unlock(&brd_devices_mutex);
        kfree(brd);
        return -ENOMEM;
 }
 
 static void brd_probe(dev_t dev)
 {
-       int i = MINOR(dev) / max_part;
-       struct brd_device *brd;
-
-       mutex_lock(&brd_devices_mutex);
-       list_for_each_entry(brd, &brd_devices, brd_list) {
-               if (brd->brd_number == i)
-                       goto out_unlock;
-       }
-
-       brd_alloc(i);
-out_unlock:
-       mutex_unlock(&brd_devices_mutex);
+       brd_alloc(MINOR(dev) / max_part);
 }
 
 static void brd_del_one(struct brd_device *brd)
 {
-       list_del(&brd->brd_list);
        del_gendisk(brd->brd_disk);
        blk_cleanup_disk(brd->brd_disk);
        brd_free_pages(brd);
+       mutex_lock(&brd_devices_mutex);
+       list_del(&brd->brd_list);
+       mutex_unlock(&brd_devices_mutex);
        kfree(brd);
 }
 
@@ -491,25 +496,21 @@ static int __init brd_init(void)
 
        brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
 
-       mutex_lock(&brd_devices_mutex);
        for (i = 0; i < rd_nr; i++) {
                err = brd_alloc(i);
                if (err)
                        goto out_free;
        }
 
-       mutex_unlock(&brd_devices_mutex);
-
        pr_info("brd: module loaded\n");
        return 0;
 
 out_free:
+       unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
        debugfs_remove_recursive(brd_debugfs_dir);
 
        list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
                brd_del_one(brd);
-       mutex_unlock(&brd_devices_mutex);
-       unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
 
        pr_info("brd: module NOT loaded !!!\n");
        return err;
@@ -519,13 +520,12 @@ static void __exit brd_exit(void)
 {
        struct brd_device *brd, *next;
 
+       unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
        debugfs_remove_recursive(brd_debugfs_dir);
 
        list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
                brd_del_one(brd);
 
-       unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
-
        pr_info("brd: module unloaded\n");
 }
 
index 5170a630778dc08ef4d4d16680693def45a57f61..1183f7872b7134f154d8a6f6827e3b4e1fcffb90 100644 (file)
@@ -97,13 +97,18 @@ struct nbd_config {
 
        atomic_t recv_threads;
        wait_queue_head_t recv_wq;
-       loff_t blksize;
+       unsigned int blksize_bits;
        loff_t bytesize;
 #if IS_ENABLED(CONFIG_DEBUG_FS)
        struct dentry *dbg_dir;
 #endif
 };
 
+static inline unsigned int nbd_blksize(struct nbd_config *config)
+{
+       return 1u << config->blksize_bits;
+}
+
 struct nbd_device {
        struct blk_mq_tag_set tag_set;
 
@@ -146,7 +151,7 @@ static struct dentry *nbd_dbg_dir;
 
 #define NBD_MAGIC 0x68797548
 
-#define NBD_DEF_BLKSIZE 1024
+#define NBD_DEF_BLKSIZE_BITS 10
 
 static unsigned int nbds_max = 16;
 static int max_part = 16;
@@ -317,12 +322,12 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
                loff_t blksize)
 {
        if (!blksize)
-               blksize = NBD_DEF_BLKSIZE;
+               blksize = 1u << NBD_DEF_BLKSIZE_BITS;
        if (blksize < 512 || blksize > PAGE_SIZE || !is_power_of_2(blksize))
                return -EINVAL;
 
        nbd->config->bytesize = bytesize;
-       nbd->config->blksize = blksize;
+       nbd->config->blksize_bits = __ffs(blksize);
 
        if (!nbd->task_recv)
                return 0;
@@ -1337,7 +1342,7 @@ static int nbd_start_device(struct nbd_device *nbd)
                args->index = i;
                queue_work(nbd->recv_workq, &args->work);
        }
-       return nbd_set_size(nbd, config->bytesize, config->blksize);
+       return nbd_set_size(nbd, config->bytesize, nbd_blksize(config));
 }
 
 static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
@@ -1406,11 +1411,11 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
        case NBD_SET_BLKSIZE:
                return nbd_set_size(nbd, config->bytesize, arg);
        case NBD_SET_SIZE:
-               return nbd_set_size(nbd, arg, config->blksize);
+               return nbd_set_size(nbd, arg, nbd_blksize(config));
        case NBD_SET_SIZE_BLOCKS:
-               if (check_mul_overflow((loff_t)arg, config->blksize, &bytesize))
+               if (check_shl_overflow(arg, config->blksize_bits, &bytesize))
                        return -EINVAL;
-               return nbd_set_size(nbd, bytesize, config->blksize);
+               return nbd_set_size(nbd, bytesize, nbd_blksize(config));
        case NBD_SET_TIMEOUT:
                nbd_set_cmd_timeout(nbd, arg);
                return 0;
@@ -1476,7 +1481,7 @@ static struct nbd_config *nbd_alloc_config(void)
        atomic_set(&config->recv_threads, 0);
        init_waitqueue_head(&config->recv_wq);
        init_waitqueue_head(&config->conn_wait);
-       config->blksize = NBD_DEF_BLKSIZE;
+       config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
        atomic_set(&config->live_connections, 0);
        try_module_get(THIS_MODULE);
        return config;
@@ -1604,7 +1609,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
        debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops);
        debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
        debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
-       debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
+       debugfs_create_u32("blocksize_bits", 0444, dir, &config->blksize_bits);
        debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops);
 
        return 0;
@@ -1826,7 +1831,7 @@ nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
 static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
 {
        struct nbd_config *config = nbd->config;
-       u64 bsize = config->blksize;
+       u64 bsize = nbd_blksize(config);
        u64 bytes = config->bytesize;
 
        if (info->attrs[NBD_ATTR_SIZE_BYTES])
@@ -1835,7 +1840,7 @@ static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
        if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES])
                bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
 
-       if (bytes != config->bytesize || bsize != config->blksize)
+       if (bytes != config->bytesize || bsize != nbd_blksize(config))
                return nbd_set_size(nbd, bytes, bsize);
        return 0;
 }
index 4b93fd83bf79b126cd261e1fee0ff98650971683..44e45af00e8385f1a01dace454bf639e18fb742c 100644 (file)
@@ -71,8 +71,10 @@ static int rnbd_clt_parse_map_options(const char *buf, size_t max_path_cnt,
        int opt_mask = 0;
        int token;
        int ret = -EINVAL;
-       int i, dest_port, nr_poll_queues;
+       int nr_poll_queues = 0;
+       int dest_port = 0;
        int p_cnt = 0;
+       int i;
 
        options = kstrdup(buf, GFP_KERNEL);
        if (!options)
index 9b3bd083b411fc0810023b48d7e34cfd5e5ee91a..303caf2d17d0c9047e532a62569a0af15400c9e1 100644 (file)
@@ -689,28 +689,6 @@ static const struct blk_mq_ops virtio_mq_ops = {
 static unsigned int virtblk_queue_depth;
 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
 
-static int virtblk_validate(struct virtio_device *vdev)
-{
-       u32 blk_size;
-
-       if (!vdev->config->get) {
-               dev_err(&vdev->dev, "%s failure: config access disabled\n",
-                       __func__);
-               return -EINVAL;
-       }
-
-       if (!virtio_has_feature(vdev, VIRTIO_BLK_F_BLK_SIZE))
-               return 0;
-
-       blk_size = virtio_cread32(vdev,
-                       offsetof(struct virtio_blk_config, blk_size));
-
-       if (blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)
-               __virtio_clear_bit(vdev, VIRTIO_BLK_F_BLK_SIZE);
-
-       return 0;
-}
-
 static int virtblk_probe(struct virtio_device *vdev)
 {
        struct virtio_blk *vblk;
@@ -722,6 +700,12 @@ static int virtblk_probe(struct virtio_device *vdev)
        u8 physical_block_exp, alignment_offset;
        unsigned int queue_depth;
 
+       if (!vdev->config->get) {
+               dev_err(&vdev->dev, "%s failure: config access disabled\n",
+                       __func__);
+               return -EINVAL;
+       }
+
        err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
                             GFP_KERNEL);
        if (err < 0)
@@ -836,14 +820,6 @@ static int virtblk_probe(struct virtio_device *vdev)
        else
                blk_size = queue_logical_block_size(q);
 
-       if (blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE) {
-               dev_err(&vdev->dev,
-                       "block size is changed unexpectedly, now is %u\n",
-                       blk_size);
-               err = -EINVAL;
-               goto out_cleanup_disk;
-       }
-
        /* Use topology information if available */
        err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
                                   struct virtio_blk_config, physical_block_exp,
@@ -1009,7 +985,6 @@ static struct virtio_driver virtio_blk = {
        .driver.name                    = KBUILD_MODNAME,
        .driver.owner                   = THIS_MODULE,
        .id_table                       = id_table,
-       .validate                       = virtblk_validate,
        .probe                          = virtblk_probe,
        .remove                         = virtblk_remove,
        .config_changed                 = virtblk_config_changed,
index a5b96f3aad677be64b8313744612fe2469150aa0..a4cf3d692dc30137023e8d2b361e804cc593b505 100644 (file)
@@ -152,18 +152,6 @@ config QCOM_EBI2
          Interface 2, which can be used to connect things like NAND Flash,
          SRAM, ethernet adapters, FPGAs and LCD displays.
 
-config SIMPLE_PM_BUS
-       tristate "Simple Power-Managed Bus Driver"
-       depends on OF && PM
-       help
-         Driver for transparent busses that don't need a real driver, but
-         where the bus controller is part of a PM domain, or under the control
-         of a functional clock, and thus relies on runtime PM for managing
-         this PM domain and/or clock.
-         An example of such a bus controller is the Renesas Bus State
-         Controller (BSC, sometimes called "LBSC within Bus Bridge", or
-         "External Bus Interface") as found on several Renesas ARM SoCs.
-
 config SUN50I_DE2_BUS
        bool "Allwinner A64 DE2 Bus Driver"
          default ARM64
index 1c29c5e8ffb83ff08576c0018923dc11de1c4095..52c2f35a26a99d587e11f9751bbaf341be703dc6 100644 (file)
@@ -27,7 +27,7 @@ obj-$(CONFIG_OMAP_OCP2SCP)    += omap-ocp2scp.o
 obj-$(CONFIG_QCOM_EBI2)                += qcom-ebi2.o
 obj-$(CONFIG_SUN50I_DE2_BUS)   += sun50i-de2.o
 obj-$(CONFIG_SUNXI_RSB)                += sunxi-rsb.o
-obj-$(CONFIG_SIMPLE_PM_BUS)    += simple-pm-bus.o
+obj-$(CONFIG_OF)               += simple-pm-bus.o
 obj-$(CONFIG_TEGRA_ACONNECT)   += tegra-aconnect.o
 obj-$(CONFIG_TEGRA_GMI)                += tegra-gmi.o
 obj-$(CONFIG_TI_PWMSS)         += ti-pwmss.o
index 01a3d0cd08edc7271215e0e61c5a1d5497ac33ac..6b8d6257ed8a49cbddce1b3c5ba03588ddea8654 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 
-
 static int simple_pm_bus_probe(struct platform_device *pdev)
 {
-       const struct of_dev_auxdata *lookup = dev_get_platdata(&pdev->dev);
-       struct device_node *np = pdev->dev.of_node;
+       const struct device *dev = &pdev->dev;
+       const struct of_dev_auxdata *lookup = dev_get_platdata(dev);
+       struct device_node *np = dev->of_node;
+       const struct of_device_id *match;
+
+       /*
+        * Allow user to use driver_override to bind this driver to a
+        * transparent bus device which has a different compatible string
+        * that's not listed in simple_pm_bus_of_match. We don't want to do any
+        * of the simple-pm-bus tasks for these devices, so return early.
+        */
+       if (pdev->driver_override)
+               return 0;
+
+       match = of_match_device(dev->driver->of_match_table, dev);
+       /*
+        * These are transparent bus devices (not simple-pm-bus matches) that
+        * have their child nodes populated automatically.  So, don't need to
+        * do anything more. We only match with the device if this driver is
+        * the most specific match because we don't want to incorrectly bind to
+        * a device that has a more specific driver.
+        */
+       if (match && match->data) {
+               if (of_property_match_string(np, "compatible", match->compatible) == 0)
+                       return 0;
+               else
+                       return -ENODEV;
+       }
 
        dev_dbg(&pdev->dev, "%s\n", __func__);
 
@@ -31,14 +56,25 @@ static int simple_pm_bus_probe(struct platform_device *pdev)
 
 static int simple_pm_bus_remove(struct platform_device *pdev)
 {
+       const void *data = of_device_get_match_data(&pdev->dev);
+
+       if (pdev->driver_override || data)
+               return 0;
+
        dev_dbg(&pdev->dev, "%s\n", __func__);
 
        pm_runtime_disable(&pdev->dev);
        return 0;
 }
 
+#define ONLY_BUS       ((void *) 1) /* Match if the device is only a bus. */
+
 static const struct of_device_id simple_pm_bus_of_match[] = {
        { .compatible = "simple-pm-bus", },
+       { .compatible = "simple-bus",   .data = ONLY_BUS },
+       { .compatible = "simple-mfd",   .data = ONLY_BUS },
+       { .compatible = "isa",          .data = ONLY_BUS },
+       { .compatible = "arm,amba-bus", .data = ONLY_BUS },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match);
index a51c2a8feed90c52eec79a80bc4922edd6259ece..6a8b7fb5be58df3aeb0270854aace22c2b956899 100644 (file)
@@ -1464,6 +1464,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
        /* Quirks that need to be set based on detected module */
        SYSC_QUIRK("aess", 0, 0, 0x10, -ENODEV, 0x40000000, 0xffffffff,
                   SYSC_MODULE_QUIRK_AESS),
+       /* Errata i893 handling for dra7 dcan1 and 2 */
+       SYSC_QUIRK("dcan", 0x4ae3c000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
+                  SYSC_QUIRK_CLKDM_NOAUTO),
        SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
                   SYSC_QUIRK_CLKDM_NOAUTO),
        SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff,
@@ -2954,6 +2957,7 @@ static int sysc_init_soc(struct sysc *ddata)
                        break;
                case SOC_AM3:
                        sysc_add_disabled(0x48310000);  /* rng */
+                       break;
                default:
                        break;
                }
index 0a5596797b9346ca1bccc7d4ecfe17797cc001ca..9ef007b3cf9b41689ac9799c41e4fbb2f238ff4a 100644 (file)
@@ -564,6 +564,7 @@ config SM_GCC_6125
 
 config SM_GCC_6350
        tristate "SM6350 Global Clock Controller"
+       select QCOM_GDSC
        help
          Support for the global clock controller on SM6350 devices.
          Say Y if you want to use peripheral devices such as UART,
index bc09736ece76c9b883ad1e66f4b682c914ef1d06..68fe9f6f0d2f3bc8428e33357d3cdaa09355307d 100644 (file)
@@ -3242,7 +3242,7 @@ static struct gdsc hlos1_vote_turing_mmu_tbu1_gdsc = {
 };
 
 static struct gdsc hlos1_vote_turing_mmu_tbu0_gdsc = {
-       .gdscr = 0x7d060,
+       .gdscr = 0x7d07c,
        .pd = {
                .name = "hlos1_vote_turing_mmu_tbu0",
        },
index 4c94b94c41253a3fafa58674d5b87e8df4e9be94..1490446985e2e887abe47ebd42f2afc083844792 100644 (file)
@@ -186,6 +186,8 @@ static struct rzg2l_reset r9a07g044_resets[] = {
 
 static const unsigned int r9a07g044_crit_mod_clks[] __initconst = {
        MOD_CLK_BASE + R9A07G044_GIC600_GICCLK,
+       MOD_CLK_BASE + R9A07G044_IA55_CLK,
+       MOD_CLK_BASE + R9A07G044_DMAC_ACLK,
 };
 
 const struct rzg2l_cpg_info r9a07g044_cpg_info = {
index 3b3b2c3347f3763fb0cdd04392f23a4d4add426c..761922ea5db7616129ddd44b9b8644da66662900 100644 (file)
@@ -391,7 +391,7 @@ static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
 
        value = readl(priv->base + CLK_MON_R(clock->off));
 
-       return !(value & bitmask);
+       return value & bitmask;
 }
 
 static const struct clk_ops rzg2l_mod_clock_ops = {
index 242e94c0cf8a387f267d6cfcf543a58395c48d94..bf8cd928c2283de16b85c56ec3adef3c8f36b918 100644 (file)
@@ -165,13 +165,6 @@ static const struct clk_parent_data mpu_mux[] = {
          .name = "boot_clk", },
 };
 
-static const struct clk_parent_data s2f_usr0_mux[] = {
-       { .fw_name = "f2s-free-clk",
-         .name = "f2s-free-clk", },
-       { .fw_name = "boot_clk",
-         .name = "boot_clk", },
-};
-
 static const struct clk_parent_data emac_mux[] = {
        { .fw_name = "emaca_free_clk",
          .name = "emaca_free_clk", },
@@ -312,8 +305,6 @@ static const struct stratix10_gate_clock agilex_gate_clks[] = {
          4, 0x44, 28, 1, 0, 0, 0},
        { AGILEX_CS_TIMER_CLK, "cs_timer_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x24,
          5, 0, 0, 0, 0x30, 1, 0},
-       { AGILEX_S2F_USER0_CLK, "s2f_user0_clk", NULL, s2f_usr0_mux, ARRAY_SIZE(s2f_usr0_mux), 0, 0x24,
-         6, 0, 0, 0, 0, 0, 0},
        { AGILEX_EMAC0_CLK, "emac0_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C,
          0, 0, 0, 0, 0x94, 26, 0},
        { AGILEX_EMAC1_CLK, "emac1_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C,
index bb88198c874e0ed2b8ed55a03fb93a9d9bef0aa0..aa4e1a5006919da78f1e5392c686479da2d579b9 100644 (file)
@@ -778,7 +778,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
                                    in_place ? DMA_BIDIRECTIONAL
                                             : DMA_TO_DEVICE);
                if (ret)
-                       goto e_ctx;
+                       goto e_aad;
 
                if (in_place) {
                        dst = src;
@@ -863,7 +863,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
        op.u.aes.size = 0;
        ret = cmd_q->ccp->vdata->perform->aes(&op);
        if (ret)
-               goto e_dst;
+               goto e_final_wa;
 
        if (aes->action == CCP_AES_ACTION_ENCRYPT) {
                /* Put the ciphered tag after the ciphertext. */
@@ -873,17 +873,19 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
                ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
                                           DMA_BIDIRECTIONAL);
                if (ret)
-                       goto e_tag;
+                       goto e_final_wa;
                ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
-               if (ret)
-                       goto e_tag;
+               if (ret) {
+                       ccp_dm_free(&tag);
+                       goto e_final_wa;
+               }
 
                ret = crypto_memneq(tag.address, final_wa.address,
                                    authsize) ? -EBADMSG : 0;
                ccp_dm_free(&tag);
        }
 
-e_tag:
+e_final_wa:
        ccp_dm_free(&final_wa);
 
 e_dst:
index e3e757513d1bc38eee01eb0c6836ecb8a63bd9a3..b1f46a974b9e0001b5333ae8c00fa49b4eafb271 100644 (file)
@@ -178,7 +178,7 @@ static void axp_mc_check(struct mem_ctl_info *mci)
                                     "details unavailable (multiple errors)");
        if (cnt_dbe)
                edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
-                                    cnt_sbe, /* error count */
+                                    cnt_dbe, /* error count */
                                     0, 0, 0, /* pfn, offset, syndrome */
                                     -1, -1, -1, /* top, mid, low layer */
                                     mci->ctl_name,
index 220a58cf0a441ca2be1e5f9dc5480e06b90fd5bb..cda7d7162cbbd0dbc1ce6222227c0488f5ff7adc 100644 (file)
@@ -203,10 +203,7 @@ config INTEL_STRATIX10_RSU
          Say Y here if you want Intel RSU support.
 
 config QCOM_SCM
-       tristate "Qcom SCM driver"
-       depends on ARM || ARM64
-       depends on HAVE_ARM_SMCCC
-       select RESET_CONTROLLER
+       tristate
 
 config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
        bool "Qualcomm download mode enabled by default"
index 00fe595a5bc8972ffc90363cda045b234d96dc01..641a91819088050759f995bbf346449a28f6a53b 100644 (file)
@@ -49,6 +49,13 @@ static int ffa_device_probe(struct device *dev)
        return ffa_drv->probe(ffa_dev);
 }
 
+static void ffa_device_remove(struct device *dev)
+{
+       struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver);
+
+       ffa_drv->remove(to_ffa_dev(dev));
+}
+
 static int ffa_device_uevent(struct device *dev, struct kobj_uevent_env *env)
 {
        struct ffa_device *ffa_dev = to_ffa_dev(dev);
@@ -86,6 +93,7 @@ struct bus_type ffa_bus_type = {
        .name           = "arm_ffa",
        .match          = ffa_device_match,
        .probe          = ffa_device_probe,
+       .remove         = ffa_device_remove,
        .uevent         = ffa_device_uevent,
        .dev_groups     = ffa_device_attributes_groups,
 };
@@ -127,7 +135,7 @@ static void ffa_release_device(struct device *dev)
 
 static int __ffa_devices_unregister(struct device *dev, void *data)
 {
-       ffa_release_device(dev);
+       device_unregister(dev);
 
        return 0;
 }
index 7f4d2435503b8ad2e650bce50c0cfc8e1cf616b9..3d7081e848536a5b587f0ad2254d1a692fc7c47a 100644 (file)
@@ -68,7 +68,7 @@ config ARM_SCMI_TRANSPORT_SMC
 
 config ARM_SCMI_TRANSPORT_VIRTIO
        bool "SCMI transport based on VirtIO"
-       depends on VIRTIO
+       depends on VIRTIO=y || VIRTIO=ARM_SCMI_PROTOCOL
        select ARM_SCMI_HAVE_TRANSPORT
        select ARM_SCMI_HAVE_MSG
        help
index 224577f869286c75ae2a277b738c2613b79ecfa1..11e8efb7137512fb292bb6055501829bec0cc881 100644 (file)
@@ -110,18 +110,16 @@ static void scmi_finalize_message(struct scmi_vio_channel *vioch,
        if (vioch->is_rx) {
                scmi_vio_feed_vq_rx(vioch, msg);
        } else {
-               unsigned long flags;
-
-               spin_lock_irqsave(&vioch->lock, flags);
+               /* Here IRQs are assumed to be already disabled by the caller */
+               spin_lock(&vioch->lock);
                list_add(&msg->list, &vioch->free_list);
-               spin_unlock_irqrestore(&vioch->lock, flags);
+               spin_unlock(&vioch->lock);
        }
 }
 
 static void scmi_vio_complete_cb(struct virtqueue *vqueue)
 {
        unsigned long ready_flags;
-       unsigned long flags;
        unsigned int length;
        struct scmi_vio_channel *vioch;
        struct scmi_vio_msg *msg;
@@ -140,7 +138,8 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue)
                        goto unlock_ready_out;
                }
 
-               spin_lock_irqsave(&vioch->lock, flags);
+               /* IRQs already disabled here no need to irqsave */
+               spin_lock(&vioch->lock);
                if (cb_enabled) {
                        virtqueue_disable_cb(vqueue);
                        cb_enabled = false;
@@ -151,7 +150,7 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue)
                                goto unlock_out;
                        cb_enabled = true;
                }
-               spin_unlock_irqrestore(&vioch->lock, flags);
+               spin_unlock(&vioch->lock);
 
                if (msg) {
                        msg->rx_len = length;
@@ -161,11 +160,18 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue)
                        scmi_finalize_message(vioch, msg);
                }
 
+               /*
+                * Release ready_lock and re-enable IRQs between loop iterations
+                * to allow virtio_chan_free() to possibly kick in and set the
+                * flag vioch->ready to false even in between processing of
+                * messages, so as to force outstanding messages to be ignored
+                * when system is shutting down.
+                */
                spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);
        }
 
 unlock_out:
-       spin_unlock_irqrestore(&vioch->lock, flags);
+       spin_unlock(&vioch->lock);
 unlock_ready_out:
        spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);
 }
@@ -384,8 +390,11 @@ static int scmi_vio_probe(struct virtio_device *vdev)
        struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT];
 
        /* Only one SCMI VirtiO device allowed */
-       if (scmi_vdev)
-               return -EINVAL;
+       if (scmi_vdev) {
+               dev_err(dev,
+                       "One SCMI Virtio device was already initialized: only one allowed.\n");
+               return -EBUSY;
+       }
 
        have_vq_rx = scmi_vio_have_vq_rx(vdev);
        vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1;
@@ -428,16 +437,25 @@ static int scmi_vio_probe(struct virtio_device *vdev)
        }
 
        vdev->priv = channels;
-       scmi_vdev = vdev;
+       /* Ensure initialized scmi_vdev is visible */
+       smp_store_mb(scmi_vdev, vdev);
 
        return 0;
 }
 
 static void scmi_vio_remove(struct virtio_device *vdev)
 {
+       /*
+        * Once we get here, virtio_chan_free() will have already been called by
+        * the SCMI core for any existing channel and, as a consequence, all the
+        * virtio channels will have been already marked NOT ready, causing any
+        * outstanding message on any vqueue to be ignored by complete_cb: now
+        * we can just stop processing buffers and destroy the vqueues.
+        */
        vdev->config->reset(vdev);
        vdev->config->del_vqs(vdev);
-       scmi_vdev = NULL;
+       /* Ensure scmi_vdev is visible as NULL */
+       smp_store_mb(scmi_vdev, NULL);
 }
 
 static int scmi_vio_validate(struct virtio_device *vdev)
@@ -476,7 +494,7 @@ static int __init virtio_scmi_init(void)
        return register_virtio_driver(&virtio_scmi_driver);
 }
 
-static void __exit virtio_scmi_exit(void)
+static void virtio_scmi_exit(void)
 {
        unregister_virtio_driver(&virtio_scmi_driver);
 }
index 73bdbd207e7aad433476b129d403725cf2c79b6b..6ec8edec63296b435389531bbc567d8703125201 100644 (file)
@@ -25,8 +25,6 @@
 #include <acpi/ghes.h>
 #include <ras/ras_event.h>
 
-static char rcd_decode_str[CPER_REC_LEN];
-
 /*
  * CPER record ID need to be unique even after reboot, because record
  * ID is used as index for ERST storage, while CPER records from
@@ -312,6 +310,7 @@ const char *cper_mem_err_unpack(struct trace_seq *p,
                                struct cper_mem_err_compact *cmem)
 {
        const char *ret = trace_seq_buffer_ptr(p);
+       char rcd_decode_str[CPER_REC_LEN];
 
        if (cper_mem_err_location(cmem, rcd_decode_str))
                trace_seq_printf(p, "%s", rcd_decode_str);
@@ -326,6 +325,7 @@ static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem,
        int len)
 {
        struct cper_mem_err_compact cmem;
+       char rcd_decode_str[CPER_REC_LEN];
 
        /* Don't trust UEFI 2.1/2.2 structure with bad validation bits */
        if (len == sizeof(struct cper_sec_mem_err_old) &&
index 365c3a43a1982fa3cdd6f03187052078024d1b5d..fe567be0f118beb18ec061850e17b3c8ae77945a 100644 (file)
@@ -271,7 +271,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
                return status;
        }
 
-       efi_info("Exiting boot services and installing virtual address map...\n");
+       efi_info("Exiting boot services...\n");
 
        map.map = &memory_map;
        status = efi_allocate_pages(MAX_FDT_SIZE, new_fdt_addr, ULONG_MAX);
index 1410beaef5c30404e3cb1503b3e7b3c509d36674..f3e54f6616f02475b95afb593fdc1c29a4eef449 100644 (file)
@@ -414,7 +414,7 @@ static void virt_efi_reset_system(int reset_type,
                                  unsigned long data_size,
                                  efi_char16_t *data)
 {
-       if (down_interruptible(&efi_runtime_lock)) {
+       if (down_trylock(&efi_runtime_lock)) {
                pr_warn("failed to invoke the reset_system() runtime service:\n"
                        "could not get exclusive access to the firmware\n");
                return;
index 69dec5af23c366f7acb2e6198b2357d0e3316acb..029d3cdb918d12e83e332ae3d2ec7096ee201324 100644 (file)
@@ -192,12 +192,19 @@ static const struct of_device_id ice40_fpga_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, ice40_fpga_of_match);
 
+static const struct spi_device_id ice40_fpga_spi_ids[] = {
+       { .name = "ice40-fpga-mgr", },
+       {},
+};
+MODULE_DEVICE_TABLE(spi, ice40_fpga_spi_ids);
+
 static struct spi_driver ice40_fpga_driver = {
        .probe = ice40_fpga_probe,
        .driver = {
                .name = "ice40spi",
                .of_match_table = of_match_ptr(ice40_fpga_of_match),
        },
+       .id_table = ice40_fpga_spi_ids,
 };
 
 module_spi_driver(ice40_fpga_driver);
index 05637d58515269b75c5a279291744ca2b6ad2481..4a55cdf089d62a33a793b4eb402ff08a19df720f 100644 (file)
@@ -174,6 +174,13 @@ static int gen_74x164_remove(struct spi_device *spi)
        return 0;
 }
 
+static const struct spi_device_id gen_74x164_spi_ids[] = {
+       { .name = "74hc595" },
+       { .name = "74lvc594" },
+       {},
+};
+MODULE_DEVICE_TABLE(spi, gen_74x164_spi_ids);
+
 static const struct of_device_id gen_74x164_dt_ids[] = {
        { .compatible = "fairchild,74hc595" },
        { .compatible = "nxp,74lvc594" },
@@ -188,6 +195,7 @@ static struct spi_driver gen_74x164_driver = {
        },
        .probe          = gen_74x164_probe,
        .remove         = gen_74x164_remove,
+       .id_table       = gen_74x164_spi_ids,
 };
 module_spi_driver(gen_74x164_driver);
 
index 0a9d746a0fe0a22fd654e3c90de1ce7e7532d9ac..d26bff29157b5d9b5004dbfc59457d99e2ca5dbc 100644 (file)
@@ -476,10 +476,19 @@ static struct platform_device *gpio_mockup_pdevs[GPIO_MOCKUP_MAX_GC];
 
 static void gpio_mockup_unregister_pdevs(void)
 {
+       struct platform_device *pdev;
+       struct fwnode_handle *fwnode;
        int i;
 
-       for (i = 0; i < GPIO_MOCKUP_MAX_GC; i++)
-               platform_device_unregister(gpio_mockup_pdevs[i]);
+       for (i = 0; i < GPIO_MOCKUP_MAX_GC; i++) {
+               pdev = gpio_mockup_pdevs[i];
+               if (!pdev)
+                       continue;
+
+               fwnode = dev_fwnode(&pdev->dev);
+               platform_device_unregister(pdev);
+               fwnode_remove_software_node(fwnode);
+       }
 }
 
 static __init char **gpio_mockup_make_line_names(const char *label,
@@ -508,6 +517,7 @@ static int __init gpio_mockup_register_chip(int idx)
        struct property_entry properties[GPIO_MOCKUP_MAX_PROP];
        struct platform_device_info pdevinfo;
        struct platform_device *pdev;
+       struct fwnode_handle *fwnode;
        char **line_names = NULL;
        char chip_label[32];
        int prop = 0, base;
@@ -536,13 +546,18 @@ static int __init gpio_mockup_register_chip(int idx)
                                        "gpio-line-names", line_names, ngpio);
        }
 
+       fwnode = fwnode_create_software_node(properties, NULL);
+       if (IS_ERR(fwnode))
+               return PTR_ERR(fwnode);
+
        pdevinfo.name = "gpio-mockup";
        pdevinfo.id = idx;
-       pdevinfo.properties = properties;
+       pdevinfo.fwnode = fwnode;
 
        pdev = platform_device_register_full(&pdevinfo);
        kfree_strarray(line_names, ngpio);
        if (IS_ERR(pdev)) {
+               fwnode_remove_software_node(fwnode);
                pr_err("error registering device");
                return PTR_ERR(pdev);
        }
index f5cfc0698799aa87d6b9bc0d13ef3d8b06c4b3b0..d2fe76f3f34fd4930c62bc1b5213422bb04386b9 100644 (file)
@@ -468,15 +468,8 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
        mutex_lock(&chip->i2c_lock);
        ret = regmap_read(chip->regmap, inreg, &reg_val);
        mutex_unlock(&chip->i2c_lock);
-       if (ret < 0) {
-               /*
-                * NOTE:
-                * diagnostic already emitted; that's all we should
-                * do unless gpio_*_value_cansleep() calls become different
-                * from their nonsleeping siblings (and report faults).
-                */
-               return 0;
-       }
+       if (ret < 0)
+               return ret;
 
        return !!(reg_val & bit);
 }
@@ -566,21 +559,21 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
 
        mutex_lock(&chip->i2c_lock);
 
-       /* Disable pull-up/pull-down */
-       ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, 0);
-       if (ret)
-               goto exit;
-
        /* Configure pull-up/pull-down */
        if (config == PIN_CONFIG_BIAS_PULL_UP)
                ret = regmap_write_bits(chip->regmap, pull_sel_reg, bit, bit);
        else if (config == PIN_CONFIG_BIAS_PULL_DOWN)
                ret = regmap_write_bits(chip->regmap, pull_sel_reg, bit, 0);
+       else
+               ret = 0;
        if (ret)
                goto exit;
 
-       /* Enable pull-up/pull-down */
-       ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, bit);
+       /* Disable/Enable pull-up/pull-down */
+       if (config == PIN_CONFIG_BIAS_DISABLE)
+               ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, 0);
+       else
+               ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, bit);
 
 exit:
        mutex_unlock(&chip->i2c_lock);
@@ -594,7 +587,9 @@ static int pca953x_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
 
        switch (pinconf_to_config_param(config)) {
        case PIN_CONFIG_BIAS_PULL_UP:
+       case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
        case PIN_CONFIG_BIAS_PULL_DOWN:
+       case PIN_CONFIG_BIAS_DISABLE:
                return pca953x_gpio_set_pull_up_down(chip, offset, config);
        default:
                return -ENOTSUPP;
index 3335bd57761da5954c8aa157c72f4134cb50a21a..ce63cbd14d69a0444b277f638d70c9303ad58523 100644 (file)
@@ -689,6 +689,7 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
        struct device_node *pctlnp = of_get_parent(np);
        struct pinctrl_dev *pctldev = NULL;
        struct rockchip_pin_bank *bank = NULL;
+       struct rockchip_pin_output_deferred *cfg;
        static int gpio;
        int id, ret;
 
@@ -716,12 +717,33 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
+       /*
+        * Prevent clashes with a deferred output setting
+        * being added right at this moment.
+        */
+       mutex_lock(&bank->deferred_lock);
+
        ret = rockchip_gpiolib_register(bank);
        if (ret) {
                clk_disable_unprepare(bank->clk);
+               mutex_unlock(&bank->deferred_lock);
                return ret;
        }
 
+       while (!list_empty(&bank->deferred_output)) {
+               cfg = list_first_entry(&bank->deferred_output,
+                                      struct rockchip_pin_output_deferred, head);
+               list_del(&cfg->head);
+
+               ret = rockchip_gpio_direction_output(&bank->gpio_chip, cfg->pin, cfg->arg);
+               if (ret)
+                       dev_warn(dev, "setting output pin %u to %u failed\n", cfg->pin, cfg->arg);
+
+               kfree(cfg);
+       }
+
+       mutex_unlock(&bank->deferred_lock);
+
        platform_set_drvdata(pdev, bank);
        dev_info(dev, "probed %pOF\n", np);
 
index d356e329e6f89e19f7af3c03f4268f0fa934dfc1..269437b01328006936a9c0fded1e6b46a249beac 100644 (file)
@@ -1087,6 +1087,7 @@ struct amdgpu_device {
 
        bool                            no_hw_access;
        struct pci_saved_state          *pci_state;
+       pci_channel_state_t             pci_channel_state;
 
        struct amdgpu_reset_control     *reset_cntl;
 };
index 2d6b2d77b73848e2e43144ca9ce94a5ec8c62a20..054c1a224defb8e6bce04523fd46978aab228108 100644 (file)
@@ -563,6 +563,7 @@ kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
 
        dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
        sg_free_table(ttm->sg);
+       kfree(ttm->sg);
        ttm->sg = NULL;
 }
 
index ab3794c42d363d8dde5dd4ea0e60b5d1135ffe56..af9bdf16eefd48761fdd9e1ce24627d0c3e083e4 100644 (file)
@@ -2394,10 +2394,6 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
        if (r)
                goto init_failed;
 
-       r = amdgpu_amdkfd_resume_iommu(adev);
-       if (r)
-               goto init_failed;
-
        r = amdgpu_device_ip_hw_init_phase1(adev);
        if (r)
                goto init_failed;
@@ -2436,6 +2432,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
        if (!adev->gmc.xgmi.pending_reset)
                amdgpu_amdkfd_device_init(adev);
 
+       r = amdgpu_amdkfd_resume_iommu(adev);
+       if (r)
+               goto init_failed;
+
        amdgpu_fru_get_product_info(adev);
 
 init_failed:
@@ -5399,6 +5399,8 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
                return PCI_ERS_RESULT_DISCONNECT;
        }
 
+       adev->pci_channel_state = state;
+
        switch (state) {
        case pci_channel_io_normal:
                return PCI_ERS_RESULT_CAN_RECOVER;
@@ -5541,6 +5543,10 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
 
        DRM_INFO("PCI error: resume callback!!\n");
 
+       /* Only continue execution for the case of pci_channel_io_frozen */
+       if (adev->pci_channel_state != pci_channel_io_frozen)
+               return;
+
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                struct amdgpu_ring *ring = adev->rings[i];
 
index 7a731673191168bee5042115b729812a35607d20..dc50c05f23fc2d6490d045bf8ea444a2ab340748 100644 (file)
@@ -837,6 +837,28 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
        return 0;
 }
 
+/* Mirrors the is_displayable check in radeonsi's gfx6_compute_surface */
+static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
+{
+       u64 micro_tile_mode;
+
+       /* Zero swizzle mode means linear */
+       if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)
+               return 0;
+
+       micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
+       switch (micro_tile_mode) {
+       case 0: /* DISPLAY */
+       case 3: /* RENDER */
+               return 0;
+       default:
+               drm_dbg_kms(afb->base.dev,
+                           "Micro tile mode %llu not supported for scanout\n",
+                           micro_tile_mode);
+               return -EINVAL;
+       }
+}
+
 static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,
                                 unsigned int *width, unsigned int *height)
 {
@@ -1103,6 +1125,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
                                    const struct drm_mode_fb_cmd2 *mode_cmd,
                                    struct drm_gem_object *obj)
 {
+       struct amdgpu_device *adev = drm_to_adev(dev);
        int ret, i;
 
        /*
@@ -1122,6 +1145,14 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
        if (ret)
                return ret;
 
+       if (!dev->mode_config.allow_fb_modifiers) {
+               drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
+                             "GFX9+ requires FB check based on format modifier\n");
+               ret = check_tiling_flags_gfx6(rfb);
+               if (ret)
+                       return ret;
+       }
+
        if (dev->mode_config.allow_fb_modifiers &&
            !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
                ret = convert_tiling_flags_to_modifier(rfb);
index e7f06bd0f0cd959f1b6f188183e8c15647484d84..1916ec84dd71f8a4bd787d13322165aadc71a023 100644 (file)
@@ -31,6 +31,8 @@
 /* delay 0.1 second to enable gfx off feature */
 #define GFX_OFF_DELAY_ENABLE         msecs_to_jiffies(100)
 
+#define GFX_OFF_NO_DELAY 0
+
 /*
  * GPU GFX IP block helpers function.
  */
@@ -558,6 +560,8 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
 
 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
 {
+       unsigned long delay = GFX_OFF_DELAY_ENABLE;
+
        if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
                return;
 
@@ -573,8 +577,14 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
 
                adev->gfx.gfx_off_req_count--;
 
-               if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state)
-                       schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
+               if (adev->gfx.gfx_off_req_count == 0 &&
+                   !adev->gfx.gfx_off_state) {
+                       /* If going to s2idle, no need to wait */
+                       if (adev->in_s0ix)
+                               delay = GFX_OFF_NO_DELAY;
+                       schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
+                                             delay);
+               }
        } else {
                if (adev->gfx.gfx_off_req_count == 0) {
                        cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
index 603c259b073b4035dacc44f1f9d1647b1d043eb9..025184a556ee6ab539503b6808d4fe261ecd186e 100644 (file)
@@ -3599,7 +3599,7 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
 
        /* set static priority for a queue/ring */
        gfx_v9_0_mqd_set_priority(ring, mqd);
-       mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
+       mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, mmCP_HQD_QUANTUM);
 
        /* map_queues packet doesn't need activate the queue,
         * so only kiq need set this field.
index 41c3a0d70b7c0b309af25d8495c07c0c94edbad8..e47104a1f55967958ad4982af315b955f9865d0e 100644 (file)
@@ -1098,6 +1098,8 @@ static int gmc_v10_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       gmc_v10_0_gart_disable(adev);
+
        if (amdgpu_sriov_vf(adev)) {
                /* full access mode, so don't touch any GMC register */
                DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
@@ -1106,7 +1108,6 @@ static int gmc_v10_0_hw_fini(void *handle)
 
        amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
        amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
-       gmc_v10_0_gart_disable(adev);
 
        return 0;
 }
index d90c16a6b2b8085d701249d0a0eaf024a8ba21cb..5551359d5dfdc77f55e9c868a47e550032926674 100644 (file)
@@ -1794,6 +1794,8 @@ static int gmc_v9_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       gmc_v9_0_gart_disable(adev);
+
        if (amdgpu_sriov_vf(adev)) {
                /* full access mode, so don't touch any GMC register */
                DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
@@ -1802,7 +1804,6 @@ static int gmc_v9_0_hw_fini(void *handle)
 
        amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
        amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
-       gmc_v9_0_gart_disable(adev);
 
        return 0;
 }
index 779f5c911e1123188d075f4973877a5aec2d2bc1..e32efcfb0c8b14196165860d8ada47211b4b33a8 100644 (file)
@@ -868,6 +868,12 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
                        msleep(1000);
        }
 
+       /* TODO: check whether can submit a doorbell request to raise
+        * a doorbell fence to exit gfxoff.
+        */
+       if (adev->in_s0ix)
+               amdgpu_gfx_off_ctrl(adev, false);
+
        sdma_v5_2_soft_reset(adev);
        /* unhalt the MEs */
        sdma_v5_2_enable(adev, true);
@@ -876,6 +882,8 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
 
        /* start the gfx rings and rlc compute queues */
        r = sdma_v5_2_gfx_resume(adev);
+       if (adev->in_s0ix)
+               amdgpu_gfx_off_ctrl(adev, true);
        if (r)
                return r;
        r = sdma_v5_2_rlc_resume(adev);
index c2a4d920da40e485a425d36c3a3250d68959fb80..4a416231b24c8b42a1269227c5e7789702f9016e 100644 (file)
@@ -1085,18 +1085,12 @@ static int kfd_resume(struct kfd_dev *kfd)
        int err = 0;
 
        err = kfd->dqm->ops.start(kfd->dqm);
-       if (err) {
+       if (err)
                dev_err(kfd_device,
                        "Error starting queue manager for device %x:%x\n",
                        kfd->pdev->vendor, kfd->pdev->device);
-               goto dqm_start_error;
-       }
 
        return err;
-
-dqm_start_error:
-       kfd_iommu_suspend(kfd);
-       return err;
 }
 
 static inline void kfd_queue_work(struct workqueue_struct *wq,
index 7dffc04a557ea12fd01f3f5454ab51edcf3738b6..127667e549c199730cef24109417f39b960a44a8 100644 (file)
@@ -25,6 +25,8 @@ config DRM_AMD_DC_HDCP
 
 config DRM_AMD_DC_SI
        bool "AMD DC support for Southern Islands ASICs"
+       depends on DRM_AMDGPU_SI
+       depends on DRM_AMD_DC
        default n
        help
          Choose this option to enable new AMD DC support for SI asics
index 66c799f5c7cf7e43acc3af2a38409241e029c3fb..1ea31dcc7a8b0af245272277c535a7a701edf0f8 100644 (file)
@@ -1115,6 +1115,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
        init_data.asic_id.pci_revision_id = adev->pdev->revision;
        init_data.asic_id.hw_internal_rev = adev->external_rev_id;
+       init_data.asic_id.chip_id = adev->pdev->device;
 
        init_data.asic_id.vram_width = adev->gmc.vram_width;
        /* TODO: initialize init_data.asic_id.vram_type here!!!! */
@@ -1719,6 +1720,7 @@ static int dm_late_init(void *handle)
                linear_lut[i] = 0xFFFF * i / 15;
 
        params.set = 0;
+       params.backlight_ramping_override = false;
        params.backlight_ramping_start = 0xCCCC;
        params.backlight_ramping_reduction = 0xCCCCCCCC;
        params.backlight_lut_array_size = 16;
index f6dbc5a747576d68b82d20c832e7717fc846ad58..6d655e158267a837d2085baa0898a137611a35e2 100644 (file)
@@ -1306,12 +1306,6 @@ static void override_training_settings(
 {
        uint32_t lane;
 
-       /* Override link settings */
-       if (link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
-               lt_settings->link_settings.link_rate = link->preferred_link_setting.link_rate;
-       if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN)
-               lt_settings->link_settings.lane_count = link->preferred_link_setting.lane_count;
-
        /* Override link spread */
        if (!link->dp_ss_off && overrides->downspread != NULL)
                lt_settings->link_settings.link_spread = *overrides->downspread ?
@@ -1826,14 +1820,13 @@ bool perform_link_training_with_retries(
                if (panel_mode == DP_PANEL_MODE_EDP) {
                        struct cp_psp *cp_psp = &stream->ctx->cp_psp;
 
-                       if (cp_psp && cp_psp->funcs.enable_assr) {
-                               if (!cp_psp->funcs.enable_assr(cp_psp->handle, link)) {
-                                       /* since eDP implies ASSR on, change panel
-                                        * mode to disable ASSR
-                                        */
-                                       panel_mode = DP_PANEL_MODE_DEFAULT;
-                               }
-                       }
+                       if (cp_psp && cp_psp->funcs.enable_assr)
+                               /* ASSR is bound to fail with unsigned PSP
+                                * verstage used during devlopment phase.
+                                * Report and continue with eDP panel mode to
+                                * perform eDP link training with right settings
+                                */
+                               cp_psp->funcs.enable_assr(cp_psp->handle, link);
                }
 #endif
 
index d8b22618b79e8bfce533f2fe523e80f5f7b227ec..c337588231ff0a485ded6951fed6862653b073d6 100644 (file)
@@ -118,6 +118,7 @@ struct dcn10_link_enc_registers {
        uint32_t RDPCSTX_PHY_CNTL4;
        uint32_t RDPCSTX_PHY_CNTL5;
        uint32_t RDPCSTX_PHY_CNTL6;
+       uint32_t RDPCSPIPE_PHY_CNTL6;
        uint32_t RDPCSTX_PHY_CNTL7;
        uint32_t RDPCSTX_PHY_CNTL8;
        uint32_t RDPCSTX_PHY_CNTL9;
index 90127c1f9e35da969d0e2897d3c46bfa18171165..b0892443fbd57d9cd76de5d7ddd3a56a060d007c 100644 (file)
@@ -37,6 +37,7 @@
 
 #include "link_enc_cfg.h"
 #include "dc_dmub_srv.h"
+#include "dal_asic_id.h"
 
 #define CTX \
        enc10->base.ctx
 #define AUX_REG_WRITE(reg_name, val) \
                        dm_write_reg(CTX, AUX_REG(reg_name), val)
 
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
+
 void dcn31_link_encoder_set_dio_phy_mux(
        struct link_encoder *enc,
        enum encoder_type_select sel,
@@ -215,8 +220,8 @@ static const struct link_encoder_funcs dcn31_link_enc_funcs = {
        .fec_is_active = enc2_fec_is_active,
        .get_dig_frontend = dcn10_get_dig_frontend,
        .get_dig_mode = dcn10_get_dig_mode,
-       .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
-       .get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
+       .is_in_alt_mode = dcn31_link_encoder_is_in_alt_mode,
+       .get_max_link_cap = dcn31_link_encoder_get_max_link_cap,
        .set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux,
 };
 
@@ -404,3 +409,60 @@ void dcn31_link_encoder_disable_output(
        }
 }
 
+bool dcn31_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+{
+       struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+       uint32_t dp_alt_mode_disable;
+       bool is_usb_c_alt_mode = false;
+
+       if (enc->features.flags.bits.DP_IS_USB_C) {
+               if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) {
+                       // [Note] no need to check hw_internal_rev once phy mux selection is ready
+                       REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+               } else {
+               /*
+                * B0 phys use a new set of registers to check whether alt mode is disabled.
+                * if value == 1 alt mode is disabled, otherwise it is enabled.
+                */
+                       if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A)
+                                       || (enc10->base.transmitter == TRANSMITTER_UNIPHY_B)
+                                       || (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) {
+                               REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+                       } else {
+                       // [Note] need to change TRANSMITTER_UNIPHY_C/D to F/G once phy mux selection is ready
+                               REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+                       }
+               }
+
+               is_usb_c_alt_mode = (dp_alt_mode_disable == 0);
+       }
+
+       return is_usb_c_alt_mode;
+}
+
+void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc,
+                                                                                struct dc_link_settings *link_settings)
+{
+       struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+       uint32_t is_in_usb_c_dp4_mode = 0;
+
+       dcn10_link_encoder_get_max_link_cap(enc, link_settings);
+
+       /* in usb c dp2 mode, max lane count is 2 */
+       if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) {
+               if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) {
+                       // [Note] no need to check hw_internal_rev once phy mux selection is ready
+                       REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
+               } else {
+                       if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A)
+                                       || (enc10->base.transmitter == TRANSMITTER_UNIPHY_B)
+                                       || (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) {
+                               REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
+                       } else {
+                               REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
+                       }
+               }
+               if (!is_in_usb_c_dp4_mode)
+                       link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
+       }
+}
index 32d146312838bb2e40d49cd256aba24cbff0e1c6..3454f1e7c1f176db3f00482198bbce5e25a5117c 100644 (file)
@@ -69,6 +69,7 @@
        SRI(RDPCSTX_PHY_CNTL4, RDPCSTX, id), \
        SRI(RDPCSTX_PHY_CNTL5, RDPCSTX, id), \
        SRI(RDPCSTX_PHY_CNTL6, RDPCSTX, id), \
+       SRI(RDPCSPIPE_PHY_CNTL6, RDPCSPIPE, id), \
        SRI(RDPCSTX_PHY_CNTL7, RDPCSTX, id), \
        SRI(RDPCSTX_PHY_CNTL8, RDPCSTX, id), \
        SRI(RDPCSTX_PHY_CNTL9, RDPCSTX, id), \
        LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_MPLL_EN, mask_sh),\
        LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_MPLL_EN, mask_sh),\
        LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
-       LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\
+       LE_SF(RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
+       LE_SF(RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\
+       LE_SF(RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\
        LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_QUOT, mask_sh),\
        LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_DEN, mask_sh),\
        LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL8, RDPCS_PHY_DP_MPLLB_SSC_PEAK, mask_sh),\
@@ -243,4 +246,13 @@ void dcn31_link_encoder_disable_output(
        struct link_encoder *enc,
        enum signal_type signal);
 
+/*
+ * Check whether USB-C DP Alt mode is disabled
+ */
+bool dcn31_link_encoder_is_in_alt_mode(
+       struct link_encoder *enc);
+
+void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc,
+       struct dc_link_settings *link_settings);
+
 #endif /* __DC_LINK_ENCODER__DCN31_H__ */
index a7702d3c75cdd8d0856160d4984ab39194426815..0006bbac466c8092ce554322ceab9758471f487a 100644 (file)
@@ -928,7 +928,7 @@ static const struct dc_debug_options debug_defaults_drv = {
        .disable_dcc = DCC_ENABLE,
        .vsr_support = true,
        .performance_trace = false,
-       .max_downscale_src_width = 7680,/*upto 8K*/
+       .max_downscale_src_width = 3840,/*upto 4K*/
        .disable_pplib_wm_range = false,
        .scl_reset_length10 = true,
        .sanity_checks = false,
@@ -1284,6 +1284,12 @@ static struct stream_encoder *dcn31_stream_encoder_create(
        if (!enc1 || !vpg || !afmt)
                return NULL;
 
+       if (ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
+                       ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+               if ((eng_id == ENGINE_ID_DIGC) || (eng_id == ENGINE_ID_DIGD))
+                       eng_id = eng_id + 3; // For B0 only. C->F, D->G.
+       }
+
        dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
                                        eng_id, vpg, afmt,
                                        &stream_enc_regs[eng_id],
index 381c17caace189fa06b0c416d86ab74ae9b64bd8..5adc471bef57f8a89dc76a390f5b6ea2e2f76cfd 100644 (file)
@@ -227,7 +227,7 @@ enum {
 #define FAMILY_YELLOW_CARP                     146
 
 #define YELLOW_CARP_A0 0x01
-#define YELLOW_CARP_B0 0x02            // TODO: DCN31 - update with correct B0 ID
+#define YELLOW_CARP_B0 0x1A
 #define YELLOW_CARP_UNKNOWN 0xFF
 
 #ifndef ASICREV_IS_YELLOW_CARP
index 92caf8441d1e08506a78317651711c60a3e2aaf6..01a56556cde13004567c3dcb4e4fe2fa7ed355f4 100644 (file)
 #define ixDPCSSYS_CR4_RAWLANEX_DIG_PCS_XF_RX_OVRD_OUT_2                                                0xe0c7
 #define ixDPCSSYS_CR4_RAWLANEX_DIG_PCS_XF_TX_OVRD_IN_2                                                 0xe0c8
 
+//RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT                                            0x10
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT                                        0x11
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT                                    0x12
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK                                              0x00010000L
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK                                          0x00020000L
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK                                      0x00040000L
+
+//RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT                                            0x10
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT                                        0x11
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT                                    0x12
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK                                              0x00010000L
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK                                          0x00020000L
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK                                      0x00040000L
+
+//[Note] Hack. RDPCSPIPE only has 2 instances.
+#define regRDPCSPIPE0_RDPCSPIPE_PHY_CNTL6                                                              0x2d73
+#define regRDPCSPIPE0_RDPCSPIPE_PHY_CNTL6_BASE_IDX                                                     2
+#define regRDPCSPIPE1_RDPCSPIPE_PHY_CNTL6                                                              0x2e4b
+#define regRDPCSPIPE1_RDPCSPIPE_PHY_CNTL6_BASE_IDX                                                     2
+#define regRDPCSPIPE2_RDPCSPIPE_PHY_CNTL6                                                              0x2d73
+#define regRDPCSPIPE2_RDPCSPIPE_PHY_CNTL6_BASE_IDX                                                     2
+#define regRDPCSPIPE3_RDPCSPIPE_PHY_CNTL6                                                              0x2e4b
+#define regRDPCSPIPE3_RDPCSPIPE_PHY_CNTL6_BASE_IDX                                                     2
+#define regRDPCSPIPE4_RDPCSPIPE_PHY_CNTL6                                                              0x2d73
+#define regRDPCSPIPE4_RDPCSPIPE_PHY_CNTL6_BASE_IDX                                                     2
 
 #endif
index 6bfaefa018182e4fef3aedd7b8d62a503da5c7a6..1e30eaeb0e1b3a75d33eea41ad508d686284c4d1 100644 (file)
@@ -1300,18 +1300,6 @@ static enum drm_mode_status ast_mode_valid(struct drm_connector *connector,
        return flags;
 }
 
-static enum drm_connector_status ast_connector_detect(struct drm_connector
-                                                  *connector, bool force)
-{
-       int r;
-
-       r = ast_get_modes(connector);
-       if (r <= 0)
-               return connector_status_disconnected;
-
-       return connector_status_connected;
-}
-
 static void ast_connector_destroy(struct drm_connector *connector)
 {
        struct ast_connector *ast_connector = to_ast_connector(connector);
@@ -1327,7 +1315,6 @@ static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
 
 static const struct drm_connector_funcs ast_connector_funcs = {
        .reset = drm_atomic_helper_connector_reset,
-       .detect = ast_connector_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .destroy = ast_connector_destroy,
        .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -1355,8 +1342,7 @@ static int ast_connector_init(struct drm_device *dev)
        connector->interlace_allowed = 0;
        connector->doublescan_allowed = 0;
 
-       connector->polled = DRM_CONNECTOR_POLL_CONNECT |
-                                               DRM_CONNECTOR_POLL_DISCONNECT;
+       connector->polled = DRM_CONNECTOR_POLL_CONNECT;
 
        drm_connector_attach_encoder(connector, encoder);
 
@@ -1425,8 +1411,6 @@ int ast_mode_config_init(struct ast_private *ast)
 
        drm_mode_config_reset(dev);
 
-       drm_kms_helper_poll_init(dev);
-
        return 0;
 }
 
index 6325877c5fd6e642169f350fa17d08593aaa95a0..ea9a79bc958391f6714249b438c96d65b94f080c 100644 (file)
@@ -1834,11 +1834,20 @@ static void connector_bad_edid(struct drm_connector *connector,
                               u8 *edid, int num_blocks)
 {
        int i;
-       u8 num_of_ext = edid[0x7e];
+       u8 last_block;
+
+       /*
+        * 0x7e in the EDID is the number of extension blocks. The EDID
+        * is 1 (base block) + num_ext_blocks big. That means we can think
+        * of 0x7e in the EDID of the _index_ of the last block in the
+        * combined chunk of memory.
+        */
+       last_block = edid[0x7e];
 
        /* Calculate real checksum for the last edid extension block data */
-       connector->real_edid_checksum =
-               drm_edid_block_checksum(edid + num_of_ext * EDID_LENGTH);
+       if (last_block < num_blocks)
+               connector->real_edid_checksum =
+                       drm_edid_block_checksum(edid + last_block * EDID_LENGTH);
 
        if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
                return;
index 3ab07832104587d97b774381b9110a570b7d714c..8e7a124d6c5a3ae27f18c268af7d02e4fa952b77 100644 (file)
@@ -1506,6 +1506,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
 {
        struct drm_client_dev *client = &fb_helper->client;
        struct drm_device *dev = fb_helper->dev;
+       struct drm_mode_config *config = &dev->mode_config;
        int ret = 0;
        int crtc_count = 0;
        struct drm_connector_list_iter conn_iter;
@@ -1663,6 +1664,11 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
        /* Handle our overallocation */
        sizes.surface_height *= drm_fbdev_overalloc;
        sizes.surface_height /= 100;
+       if (sizes.surface_height > config->max_height) {
+               drm_dbg_kms(dev, "Fbdev over-allocation too large; clamping height to %d\n",
+                           config->max_height);
+               sizes.surface_height = config->max_height;
+       }
 
        /* push down into drivers */
        ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
index 9870c4e6af36cdaaf37253ab1d69952a43001dd6..b5001db7a95c6eb85a8f9bffd12267ea5034866f 100644 (file)
@@ -793,7 +793,6 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct decon_context *ctx;
-       struct resource *res;
        int ret;
        int i;
 
@@ -818,8 +817,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
                ctx->clks[i] = clk;
        }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       ctx->addr = devm_ioremap_resource(dev, res);
+       ctx->addr = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(ctx->addr))
                return PTR_ERR(ctx->addr);
 
index e39fac889edc339960ee280a81635c0fb6a21615..8d137857818cab55f2304bd7a18a63171f251b07 100644 (file)
@@ -1738,7 +1738,6 @@ static const struct component_ops exynos_dsi_component_ops = {
 static int exynos_dsi_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct resource *res;
        struct exynos_dsi *dsi;
        int ret, i;
 
@@ -1789,8 +1788,7 @@ static int exynos_dsi_probe(struct platform_device *pdev)
                }
        }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       dsi->reg_base = devm_ioremap_resource(dev, res);
+       dsi->reg_base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(dsi->reg_base))
                return PTR_ERR(dsi->reg_base);
 
index a3c718148c451122f6b622f81941d2f416dd92b0..ecfd82d0afb7e29552f19a496e7327cc1ab2d67f 100644 (file)
@@ -85,7 +85,6 @@ struct fimc_scaler {
 /*
  * A structure of fimc context.
  *
- * @regs_res: register resources.
  * @regs: memory mapped io registers.
  * @lock: locking of operations.
  * @clocks: fimc clocks.
@@ -103,7 +102,6 @@ struct fimc_context {
        struct exynos_drm_ipp_formats   *formats;
        unsigned int                    num_formats;
 
-       struct resource *regs_res;
        void __iomem    *regs;
        spinlock_t      lock;
        struct clk      *clocks[FIMC_CLKS_MAX];
@@ -1327,8 +1325,7 @@ static int fimc_probe(struct platform_device *pdev)
        ctx->num_formats = num_formats;
 
        /* resource memory */
-       ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
+       ctx->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(ctx->regs))
                return PTR_ERR(ctx->regs);
 
index 700ca4fa6665c577f48623916cfc3c66418cda41..c735e53939d88c86799afb873afbb75f4dd54ea9 100644 (file)
@@ -1202,9 +1202,7 @@ static int fimd_probe(struct platform_device *pdev)
                return PTR_ERR(ctx->lcd_clk);
        }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
-       ctx->regs = devm_ioremap_resource(dev, res);
+       ctx->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(ctx->regs))
                return PTR_ERR(ctx->regs);
 
index b00230626c6a008fcd46d9d6ae92861ac193c307..471fd6c8135f2c996acb77c7b43d1fbdba0b6570 100644 (file)
@@ -1449,7 +1449,6 @@ static const struct component_ops g2d_component_ops = {
 static int g2d_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct resource *res;
        struct g2d_data *g2d;
        int ret;
 
@@ -1491,9 +1490,7 @@ static int g2d_probe(struct platform_device *pdev)
        clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
        clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
-       g2d->regs = devm_ioremap_resource(dev, res);
+       g2d->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(g2d->regs)) {
                ret = PTR_ERR(g2d->regs);
                goto err_put_clk;
index 90d7bf9068852cfad01b945047783cdf83045128..166a802628963226b6678d5668660a0761c3e29d 100644 (file)
@@ -86,7 +86,6 @@ struct gsc_scaler {
 /*
  * A structure of gsc context.
  *
- * @regs_res: register resources.
  * @regs: memory mapped io registers.
  * @gsc_clk: gsc gate clock.
  * @sc: scaler infomations.
@@ -103,7 +102,6 @@ struct gsc_context {
        struct exynos_drm_ipp_formats   *formats;
        unsigned int                    num_formats;
 
-       struct resource *regs_res;
        void __iomem    *regs;
        const char      **clk_names;
        struct clk      *clocks[GSC_MAX_CLOCKS];
@@ -1272,9 +1270,7 @@ static int gsc_probe(struct platform_device *pdev)
                }
        }
 
-       /* resource memory */
-       ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
+       ctx->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(ctx->regs))
                return PTR_ERR(ctx->regs);
 
index ee61be4cf15216f0c5044a1e93e2e4d92965822b..dec7df35baa9dd063a5fb34f02bea41d1f05e059 100644 (file)
@@ -278,7 +278,6 @@ static const struct component_ops rotator_component_ops = {
 static int rotator_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct resource *regs_res;
        struct rot_context *rot;
        const struct rot_variant *variant;
        int irq;
@@ -292,8 +291,7 @@ static int rotator_probe(struct platform_device *pdev)
        rot->formats = variant->formats;
        rot->num_formats = variant->num_formats;
        rot->dev = dev;
-       regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       rot->regs = devm_ioremap_resource(dev, regs_res);
+       rot->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(rot->regs))
                return PTR_ERR(rot->regs);
 
index f9ae5b038d59301a24c456aaeb1e91803ad3260a..3a7851b7dc668fefbe91b1cd6ed577245a779e55 100644 (file)
@@ -485,7 +485,6 @@ static const struct component_ops scaler_component_ops = {
 static int scaler_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct resource *regs_res;
        struct scaler_context *scaler;
        int irq;
        int ret, i;
@@ -498,8 +497,7 @@ static int scaler_probe(struct platform_device *pdev)
                (struct scaler_data *)of_device_get_match_data(dev);
 
        scaler->dev = dev;
-       regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       scaler->regs = devm_ioremap_resource(dev, regs_res);
+       scaler->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(scaler->regs))
                return PTR_ERR(scaler->regs);
 
index c769dec576de54f13c8a7f37127579fc5b990e8c..7655142a4651cadc40d25e5fb6f719aa28dec516 100644 (file)
@@ -1957,7 +1957,6 @@ static int hdmi_probe(struct platform_device *pdev)
        struct hdmi_audio_infoframe *audio_infoframe;
        struct device *dev = &pdev->dev;
        struct hdmi_context *hdata;
-       struct resource *res;
        int ret;
 
        hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
@@ -1979,8 +1978,7 @@ static int hdmi_probe(struct platform_device *pdev)
                return ret;
        }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       hdata->regs = devm_ioremap_resource(dev, res);
+       hdata->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(hdata->regs)) {
                ret = PTR_ERR(hdata->regs);
                return ret;
index 886add4f9cd0fde76b784d8c6e04903819fdd757..d2d8582b36df9aebac0ebbd6e4ac5f30ddf1bef8 100644 (file)
@@ -46,6 +46,7 @@ int hyperv_mode_config_init(struct hyperv_drm_device *hv);
 int hyperv_update_vram_location(struct hv_device *hdev, phys_addr_t vram_pp);
 int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp,
                            u32 w, u32 h, u32 pitch);
+int hyperv_hide_hw_ptr(struct hv_device *hdev);
 int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect);
 int hyperv_connect_vsp(struct hv_device *hdev);
 
index 6dd4717d3e1eacd74be7e66e574052a02388ee69..8c97a20dfe2310505110a4e125b3983490b41385 100644 (file)
@@ -101,6 +101,7 @@ static void hyperv_pipe_enable(struct drm_simple_display_pipe *pipe,
        struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
        struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
 
+       hyperv_hide_hw_ptr(hv->hdev);
        hyperv_update_situation(hv->hdev, 1,  hv->screen_depth,
                                crtc_state->mode.hdisplay,
                                crtc_state->mode.vdisplay,
index 6d4bdccfbd1adff363d04622d065187453173509..c0155c6271bf8935f9b1c967a9d2fd739fc4f074 100644 (file)
@@ -299,6 +299,55 @@ int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp,
        return 0;
 }
 
+/*
+ * Hyper-V supports a hardware cursor feature. It's not used by Linux VM,
+ * but the Hyper-V host still draws a point as an extra mouse pointer,
+ * which is unwanted, especially when Xorg is running.
+ *
+ * The hyperv_fb driver uses synthvid_send_ptr() to hide the unwanted
+ * pointer, by setting msg.ptr_pos.is_visible = 1 and setting the
+ * msg.ptr_shape.data. Note: setting msg.ptr_pos.is_visible to 0 doesn't
+ * work in tests.
+ *
+ * Copy synthvid_send_ptr() to hyperv_drm and rename it to
+ * hyperv_hide_hw_ptr(). Note: hyperv_hide_hw_ptr() is also called in the
+ * handler of the SYNTHVID_FEATURE_CHANGE event, otherwise the host still
+ * draws an extra unwanted mouse pointer after the VM Connection window is
+ * closed and reopened.
+ */
+int hyperv_hide_hw_ptr(struct hv_device *hdev)
+{
+       struct synthvid_msg msg;
+
+       memset(&msg, 0, sizeof(struct synthvid_msg));
+       msg.vid_hdr.type = SYNTHVID_POINTER_POSITION;
+       msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
+               sizeof(struct synthvid_pointer_position);
+       msg.ptr_pos.is_visible = 1;
+       msg.ptr_pos.video_output = 0;
+       msg.ptr_pos.image_x = 0;
+       msg.ptr_pos.image_y = 0;
+       hyperv_sendpacket(hdev, &msg);
+
+       memset(&msg, 0, sizeof(struct synthvid_msg));
+       msg.vid_hdr.type = SYNTHVID_POINTER_SHAPE;
+       msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
+               sizeof(struct synthvid_pointer_shape);
+       msg.ptr_shape.part_idx = SYNTHVID_CURSOR_COMPLETE;
+       msg.ptr_shape.is_argb = 1;
+       msg.ptr_shape.width = 1;
+       msg.ptr_shape.height = 1;
+       msg.ptr_shape.hot_x = 0;
+       msg.ptr_shape.hot_y = 0;
+       msg.ptr_shape.data[0] = 0;
+       msg.ptr_shape.data[1] = 1;
+       msg.ptr_shape.data[2] = 1;
+       msg.ptr_shape.data[3] = 1;
+       hyperv_sendpacket(hdev, &msg);
+
+       return 0;
+}
+
 int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect)
 {
        struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
@@ -392,8 +441,11 @@ static void hyperv_receive_sub(struct hv_device *hdev)
                return;
        }
 
-       if (msg->vid_hdr.type == SYNTHVID_FEATURE_CHANGE)
+       if (msg->vid_hdr.type == SYNTHVID_FEATURE_CHANGE) {
                hv->dirt_needed = msg->feature_chg.is_dirt_needed;
+               if (hv->dirt_needed)
+                       hyperv_hide_hw_ptr(hv->hdev);
+       }
 }
 
 static void hyperv_receive(void *ctx)
index 43ec7fcd3f5d284c08d275640d110b69ae64a159..a3eae3f3eadcee308442f9cf2be36dacdfeeb0b5 100644 (file)
@@ -1577,8 +1577,14 @@ static void gen11_dsi_sync_state(struct intel_encoder *encoder,
                                 const struct intel_crtc_state *crtc_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
-       enum pipe pipe = intel_crtc->pipe;
+       struct intel_crtc *intel_crtc;
+       enum pipe pipe;
+
+       if (!crtc_state)
+               return;
+
+       intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+       pipe = intel_crtc->pipe;
 
        /* wa verify 1409054076:icl,jsl,ehl */
        if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B &&
index 7cfe91fc05f2470a21133cb2f7d7d3a1bc28e1c6..68abeaf2d7d4d818c3f6c5ddd3374838469b8bd5 100644 (file)
@@ -186,13 +186,16 @@ void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915)
 {
        struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
        acpi_handle dhandle;
+       union acpi_object *obj;
 
        dhandle = ACPI_HANDLE(&pdev->dev);
        if (!dhandle)
                return;
 
-       acpi_evaluate_dsm(dhandle, &intel_dsm_guid2, INTEL_DSM_REVISION_ID,
-                         INTEL_DSM_FN_GET_BIOS_DATA_FUNCS_SUPPORTED, NULL);
+       obj = acpi_evaluate_dsm(dhandle, &intel_dsm_guid2, INTEL_DSM_REVISION_ID,
+                               INTEL_DSM_FN_GET_BIOS_DATA_FUNCS_SUPPORTED, NULL);
+       if (obj)
+               ACPI_FREE(obj);
 }
 
 /*
index 532237588511822c336c998a7181d794ff787b70..4e0f96bf61585bba4a9dcb375326f3b1505294ee 100644 (file)
@@ -1308,8 +1308,9 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
                else
                        aud_freq = aud_freq_init;
 
-               /* use BIOS provided value for TGL unless it is a known bad value */
-               if (IS_TIGERLAKE(dev_priv) && aud_freq_init != AUD_FREQ_TGL_BROKEN)
+               /* use BIOS provided value for TGL and RKL unless it is a known bad value */
+               if ((IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv)) &&
+                   aud_freq_init != AUD_FREQ_TGL_BROKEN)
                        aud_freq = aud_freq_init;
 
                drm_dbg_kms(&dev_priv->drm, "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n",
index e86e6ed2d3bf20613e45f2e86882fd511d850863..fd71346aac7bcb77d8688a0cb0e5e63e4255298b 100644 (file)
@@ -451,13 +451,23 @@ parse_lfp_backlight(struct drm_i915_private *i915,
        }
 
        i915->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
-       if (bdb->version >= 191 &&
-           get_blocksize(backlight_data) >= sizeof(*backlight_data)) {
-               const struct lfp_backlight_control_method *method;
+       if (bdb->version >= 191) {
+               size_t exp_size;
 
-               method = &backlight_data->backlight_control[panel_type];
-               i915->vbt.backlight.type = method->type;
-               i915->vbt.backlight.controller = method->controller;
+               if (bdb->version >= 236)
+                       exp_size = sizeof(struct bdb_lfp_backlight_data);
+               else if (bdb->version >= 234)
+                       exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_234;
+               else
+                       exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_191;
+
+               if (get_blocksize(backlight_data) >= exp_size) {
+                       const struct lfp_backlight_control_method *method;
+
+                       method = &backlight_data->backlight_control[panel_type];
+                       i915->vbt.backlight.type = method->type;
+                       i915->vbt.backlight.controller = method->controller;
+               }
        }
 
        i915->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
index 9903a78df896fd141d035810f75351d5c0c0ade7..bd184325d0c75832b77130fb0c70c1c84bedaf94 100644 (file)
@@ -3807,7 +3807,13 @@ void hsw_ddi_get_config(struct intel_encoder *encoder,
 static void intel_ddi_sync_state(struct intel_encoder *encoder,
                                 const struct intel_crtc_state *crtc_state)
 {
-       if (intel_crtc_has_dp_encoder(crtc_state))
+       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+       enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+       if (intel_phy_is_tc(i915, phy))
+               intel_tc_port_sanitize(enc_to_dig_port(encoder));
+
+       if (crtc_state && intel_crtc_has_dp_encoder(crtc_state))
                intel_dp_sync_state(encoder, crtc_state);
 }
 
index 134a6acbd8fbe396584e2c7590c2c8796da1aaf2..17f44ffea5866f5ff22f4fa84d7c4aef730f3658 100644 (file)
@@ -13082,18 +13082,16 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
        readout_plane_state(dev_priv);
 
        for_each_intel_encoder(dev, encoder) {
+               struct intel_crtc_state *crtc_state = NULL;
+
                pipe = 0;
 
                if (encoder->get_hw_state(encoder, &pipe)) {
-                       struct intel_crtc_state *crtc_state;
-
                        crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
                        crtc_state = to_intel_crtc_state(crtc->base.state);
 
                        encoder->base.crtc = &crtc->base;
                        intel_encoder_get_config(encoder, crtc_state);
-                       if (encoder->sync_state)
-                               encoder->sync_state(encoder, crtc_state);
 
                        /* read out to slave crtc as well for bigjoiner */
                        if (crtc_state->bigjoiner) {
@@ -13108,6 +13106,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                        encoder->base.crtc = NULL;
                }
 
+               if (encoder->sync_state)
+                       encoder->sync_state(encoder, crtc_state);
+
                drm_dbg_kms(&dev_priv->drm,
                            "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
                            encoder->base.base.id, encoder->base.name,
@@ -13390,17 +13391,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
        intel_modeset_readout_hw_state(dev);
 
        /* HW state is read out, now we need to sanitize this mess. */
-
-       /* Sanitize the TypeC port mode upfront, encoders depend on this */
-       for_each_intel_encoder(dev, encoder) {
-               enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
-
-               /* We need to sanitize only the MST primary port. */
-               if (encoder->type != INTEL_OUTPUT_DP_MST &&
-                   intel_phy_is_tc(dev_priv, phy))
-                       intel_tc_port_sanitize(enc_to_dig_port(encoder));
-       }
-
        get_encoder_power_domains(dev_priv);
 
        if (HAS_PCH_IBX(dev_priv))
index 330077c2e5883df6810c5ceca805e2f9dad443da..a2108a8f544d8040db7186a23f043123061d25d4 100644 (file)
@@ -814,6 +814,11 @@ struct lfp_brightness_level {
        u16 reserved;
 } __packed;
 
+#define EXP_BDB_LFP_BL_DATA_SIZE_REV_191 \
+       offsetof(struct bdb_lfp_backlight_data, brightness_level)
+#define EXP_BDB_LFP_BL_DATA_SIZE_REV_234 \
+       offsetof(struct bdb_lfp_backlight_data, brightness_precision_bits)
+
 struct bdb_lfp_backlight_data {
        u8 entry_size;
        struct lfp_backlight_data_entry data[16];
index 9ccf4b29b82e107dddec3019ac9243b88d105528..166bb46408a9bd655c044db8895e648cff79f765 100644 (file)
@@ -937,6 +937,10 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
        unsigned int n;
 
        e = alloc_engines(num_engines);
+       if (!e)
+               return ERR_PTR(-ENOMEM);
+       e->num_engines = num_engines;
+
        for (n = 0; n < num_engines; n++) {
                struct intel_context *ce;
                int ret;
@@ -970,7 +974,6 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
                        goto free_engines;
                }
        }
-       e->num_engines = num_engines;
 
        return e;
 
index e382b7f2353b89d775a269ac544751e2ce081341..5ab136ffdeb2dd4fa06cdcb052d9e77fd7398eb7 100644 (file)
@@ -118,7 +118,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
        intel_wakeref_t wakeref = 0;
        unsigned long count = 0;
        unsigned long scanned = 0;
-       int err;
+       int err = 0;
 
        /* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */
        bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915);
@@ -242,12 +242,15 @@ skip:
                list_splice_tail(&still_in_list, phase->list);
                spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
                if (err)
-                       return err;
+                       break;
        }
 
        if (shrink & I915_SHRINK_BOUND)
                intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 
+       if (err)
+               return err;
+
        if (nr_scanned)
                *nr_scanned += scanned;
        return count;
index 745e84c72c908f8a741f32e5f01e28f6c41da205..17ca4dc4d0cb3a03169fc0efc3934a0851631a7e 100644 (file)
@@ -362,8 +362,9 @@ static int __intel_context_active(struct i915_active *active)
        return 0;
 }
 
-static int sw_fence_dummy_notify(struct i915_sw_fence *sf,
-                                enum i915_sw_fence_notify state)
+static int __i915_sw_fence_call
+sw_fence_dummy_notify(struct i915_sw_fence *sf,
+                     enum i915_sw_fence_notify state)
 {
        return NOTIFY_DONE;
 }
@@ -420,6 +421,7 @@ void intel_context_fini(struct intel_context *ce)
 
        mutex_destroy(&ce->pin_mutex);
        i915_active_fini(&ce->active);
+       i915_sw_fence_fini(&ce->guc_blocked);
 }
 
 void i915_context_module_exit(void)
index 591a5224287ea83475df1184855e8a34bc6478f3..0a03fbed9f9b7117a86620abde12597716a2adc2 100644 (file)
@@ -882,8 +882,6 @@ void intel_rps_park(struct intel_rps *rps)
        if (!intel_rps_is_enabled(rps))
                return;
 
-       GEM_BUG_ON(atomic_read(&rps->num_waiters));
-
        if (!intel_rps_clear_active(rps))
                return;
 
index 99e1fad5ca206cd1308b641bf659513604fb3628..c9086a600bce57c485d2d6fd87c7c8a44278937f 100644 (file)
@@ -102,11 +102,11 @@ static_assert(sizeof(struct guc_ct_buffer_desc) == 64);
  *  |   +-------+--------------------------------------------------------------+
  *  |   |   7:0 | NUM_DWORDS = length (in dwords) of the embedded HXG message  |
  *  +---+-------+--------------------------------------------------------------+
- *  | 1 |  31:0 |  +--------------------------------------------------------+  |
- *  +---+-------+  |                                                        |  |
- *  |...|       |  |  Embedded `HXG Message`_                               |  |
- *  +---+-------+  |                                                        |  |
- *  | n |  31:0 |  +--------------------------------------------------------+  |
+ *  | 1 |  31:0 |                                                              |
+ *  +---+-------+                                                              |
+ *  |...|       | [Embedded `HXG Message`_]                                    |
+ *  +---+-------+                                                              |
+ *  | n |  31:0 |                                                              |
  *  +---+-------+--------------------------------------------------------------+
  */
 
index bbf1ddb7743427e857f1b95f79a0cd044e80a7bd..9baa3cb07d1372b622b9d09452893758c0f2f48f 100644 (file)
  *  +---+-------+--------------------------------------------------------------+
  *  |   | Bits  | Description                                                  |
  *  +===+=======+==============================================================+
- *  | 0 |  31:0 |  +--------------------------------------------------------+  |
- *  +---+-------+  |                                                        |  |
- *  |...|       |  |  Embedded `HXG Message`_                               |  |
- *  +---+-------+  |                                                        |  |
- *  | n |  31:0 |  +--------------------------------------------------------+  |
+ *  | 0 |  31:0 |                                                              |
+ *  +---+-------+                                                              |
+ *  |...|       | [Embedded `HXG Message`_]                                    |
+ *  +---+-------+                                                              |
+ *  | n |  31:0 |                                                              |
  *  +---+-------+--------------------------------------------------------------+
  */
 
index b56a8e37a3cd6acc794f8e36bfb89337153b101a..1bb1be5c48c847e1ef0c9a681400c5c664b8e781 100644 (file)
@@ -576,7 +576,7 @@ retry:
 
                        /* No one is going to touch shadow bb from now on. */
                        i915_gem_object_flush_map(bb->obj);
-                       i915_gem_object_unlock(bb->obj);
+                       i915_gem_ww_ctx_fini(&ww);
                }
        }
        return 0;
@@ -630,7 +630,7 @@ retry:
                return ret;
        }
 
-       i915_gem_object_unlock(wa_ctx->indirect_ctx.obj);
+       i915_gem_ww_ctx_fini(&ww);
 
        /* FIXME: we are not tracking our pinned VMA leaving it
         * up to the core to fix up the stray pin_count upon
index 664970f2bc62a76cf78956aa990b4d0186396fe7..4037030f0984740911ba5c683785f46f8afaa1b3 100644 (file)
@@ -8193,6 +8193,11 @@ enum {
 #define  HSW_SPR_STRETCH_MAX_X1                REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 3)
 #define  HSW_FBCQ_DIS                  (1 << 22)
 #define  BDW_DPRS_MASK_VBLANK_SRD      (1 << 0)
+#define  SKL_PLANE1_STRETCH_MAX_MASK   REG_GENMASK(1, 0)
+#define  SKL_PLANE1_STRETCH_MAX_X8     REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 0)
+#define  SKL_PLANE1_STRETCH_MAX_X4     REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 1)
+#define  SKL_PLANE1_STRETCH_MAX_X2     REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 2)
+#define  SKL_PLANE1_STRETCH_MAX_X1     REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 3)
 #define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
 
 #define _CHICKEN_TRANS_A       0x420c0
index ce446716d0929f75632415901122d435bbe270e6..79da5eca60af54b97a7ecea3d18b8dfa2a34377a 100644 (file)
@@ -829,8 +829,6 @@ static void __i915_request_ctor(void *arg)
        i915_sw_fence_init(&rq->submit, submit_notify);
        i915_sw_fence_init(&rq->semaphore, semaphore_notify);
 
-       dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0);
-
        rq->capture_list = NULL;
 
        init_llist_head(&rq->execute_cb);
@@ -905,17 +903,12 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
        rq->ring = ce->ring;
        rq->execution_mask = ce->engine->mask;
 
-       kref_init(&rq->fence.refcount);
-       rq->fence.flags = 0;
-       rq->fence.error = 0;
-       INIT_LIST_HEAD(&rq->fence.cb_list);
-
        ret = intel_timeline_get_seqno(tl, rq, &seqno);
        if (ret)
                goto err_free;
 
-       rq->fence.context = tl->fence_context;
-       rq->fence.seqno = seqno;
+       dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
+                      tl->fence_context, seqno);
 
        RCU_INIT_POINTER(rq->timeline, tl);
        rq->hwsp_seqno = tl->hwsp_seqno;
index 65bc3709f54c5aa4b1a9b9b981c7da6d62287df2..a725792d5248bb49799737d69ea54b99ea45dc10 100644 (file)
@@ -76,6 +76,8 @@ struct intel_wm_config {
 
 static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
 {
+       enum pipe pipe;
+
        if (HAS_LLC(dev_priv)) {
                /*
                 * WaCompressedResourceDisplayNewHashMode:skl,kbl
@@ -89,6 +91,16 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
                           SKL_DE_COMPRESSED_HASH_MODE);
        }
 
+       for_each_pipe(dev_priv, pipe) {
+               /*
+                * "Plane N strech max must be programmed to 11b (x1)
+                *  when Async flips are enabled on that plane."
+                */
+               if (!IS_GEMINILAKE(dev_priv) && intel_vtd_active())
+                       intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
+                                        SKL_PLANE1_STRETCH_MAX_MASK, SKL_PLANE1_STRETCH_MAX_X1);
+       }
+
        /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
        intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
                   intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
index 44327bc629ca0fc794d5947fa4f899ef95b46a4e..06613ffeaaf85eb401a45cb70a6e7dd313832ba3 100644 (file)
@@ -66,7 +66,8 @@ static const struct drm_crtc_funcs kmb_crtc_funcs = {
        .disable_vblank = kmb_crtc_disable_vblank,
 };
 
-static void kmb_crtc_set_mode(struct drm_crtc *crtc)
+static void kmb_crtc_set_mode(struct drm_crtc *crtc,
+                             struct drm_atomic_state *old_state)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_display_mode *m = &crtc->state->adjusted_mode;
@@ -75,7 +76,7 @@ static void kmb_crtc_set_mode(struct drm_crtc *crtc)
        unsigned int val = 0;
 
        /* Initialize mipi */
-       kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz);
+       kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz, old_state);
        drm_info(dev,
                 "vfp= %d vbp= %d vsync_len=%d hfp=%d hbp=%d hsync_len=%d\n",
                 m->crtc_vsync_start - m->crtc_vdisplay,
@@ -138,7 +139,7 @@ static void kmb_crtc_atomic_enable(struct drm_crtc *crtc,
        struct kmb_drm_private *kmb = crtc_to_kmb_priv(crtc);
 
        clk_prepare_enable(kmb->kmb_clk.clk_lcd);
-       kmb_crtc_set_mode(crtc);
+       kmb_crtc_set_mode(crtc, state);
        drm_crtc_vblank_on(crtc);
 }
 
@@ -185,11 +186,45 @@ static void kmb_crtc_atomic_flush(struct drm_crtc *crtc,
        spin_unlock_irq(&crtc->dev->event_lock);
 }
 
+static enum drm_mode_status
+               kmb_crtc_mode_valid(struct drm_crtc *crtc,
+                                   const struct drm_display_mode *mode)
+{
+       int refresh;
+       struct drm_device *dev = crtc->dev;
+       int vfp = mode->vsync_start - mode->vdisplay;
+
+       if (mode->vdisplay < KMB_CRTC_MAX_HEIGHT) {
+               drm_dbg(dev, "height = %d less than %d",
+                       mode->vdisplay, KMB_CRTC_MAX_HEIGHT);
+               return MODE_BAD_VVALUE;
+       }
+       if (mode->hdisplay < KMB_CRTC_MAX_WIDTH) {
+               drm_dbg(dev, "width = %d less than %d",
+                       mode->hdisplay, KMB_CRTC_MAX_WIDTH);
+               return MODE_BAD_HVALUE;
+       }
+       refresh = drm_mode_vrefresh(mode);
+       if (refresh < KMB_MIN_VREFRESH || refresh > KMB_MAX_VREFRESH) {
+               drm_dbg(dev, "refresh = %d less than %d or greater than %d",
+                       refresh, KMB_MIN_VREFRESH, KMB_MAX_VREFRESH);
+               return MODE_BAD;
+       }
+
+       if (vfp < KMB_CRTC_MIN_VFP) {
+               drm_dbg(dev, "vfp = %d less than %d", vfp, KMB_CRTC_MIN_VFP);
+               return MODE_BAD;
+       }
+
+       return MODE_OK;
+}
+
 static const struct drm_crtc_helper_funcs kmb_crtc_helper_funcs = {
        .atomic_begin = kmb_crtc_atomic_begin,
        .atomic_enable = kmb_crtc_atomic_enable,
        .atomic_disable = kmb_crtc_atomic_disable,
        .atomic_flush = kmb_crtc_atomic_flush,
+       .mode_valid = kmb_crtc_mode_valid,
 };
 
 int kmb_setup_crtc(struct drm_device *drm)
index 1c2f4799f421ddcd520917dd95d2aa2d6830c819..961ac6fb5fcf746e658e6f3fc4a839fe6b15f4e6 100644 (file)
@@ -172,10 +172,10 @@ static int kmb_setup_mode_config(struct drm_device *drm)
        ret = drmm_mode_config_init(drm);
        if (ret)
                return ret;
-       drm->mode_config.min_width = KMB_MIN_WIDTH;
-       drm->mode_config.min_height = KMB_MIN_HEIGHT;
-       drm->mode_config.max_width = KMB_MAX_WIDTH;
-       drm->mode_config.max_height = KMB_MAX_HEIGHT;
+       drm->mode_config.min_width = KMB_FB_MIN_WIDTH;
+       drm->mode_config.min_height = KMB_FB_MIN_HEIGHT;
+       drm->mode_config.max_width = KMB_FB_MAX_WIDTH;
+       drm->mode_config.max_height = KMB_FB_MAX_HEIGHT;
        drm->mode_config.funcs = &kmb_mode_config_funcs;
 
        ret = kmb_setup_crtc(drm);
@@ -380,7 +380,7 @@ static irqreturn_t handle_lcd_irq(struct drm_device *dev)
                if (val & LAYER3_DMA_FIFO_UNDERFLOW)
                        drm_dbg(&kmb->drm,
                                "LAYER3:GL1 DMA UNDERFLOW val = 0x%lx", val);
-               if (val & LAYER3_DMA_FIFO_UNDERFLOW)
+               if (val & LAYER3_DMA_FIFO_OVERFLOW)
                        drm_dbg(&kmb->drm,
                                "LAYER3:GL1 DMA OVERFLOW val = 0x%lx", val);
        }
index ebbaa5f422d591ea285544898eec1a84ccf38de0..bf085e95b28f45170ff8b7957042f00332bb5197 100644 (file)
 #define DRIVER_MAJOR                   1
 #define DRIVER_MINOR                   1
 
+/* Platform definitions */
+#define KMB_CRTC_MIN_VFP               4
+#define KMB_CRTC_MAX_WIDTH             1920 /* max width in pixels */
+#define KMB_CRTC_MAX_HEIGHT            1080 /* max height in pixels */
+#define KMB_CRTC_MIN_WIDTH             1920
+#define KMB_CRTC_MIN_HEIGHT            1080
+#define KMB_FB_MAX_WIDTH               1920
+#define KMB_FB_MAX_HEIGHT              1080
+#define KMB_FB_MIN_WIDTH               1
+#define KMB_FB_MIN_HEIGHT              1
+#define KMB_MIN_VREFRESH               59    /*vertical refresh in Hz */
+#define KMB_MAX_VREFRESH               60    /*vertical refresh in Hz */
 #define KMB_LCD_DEFAULT_CLK            200000000
 #define KMB_SYS_CLK_MHZ                        500
 
@@ -45,6 +57,7 @@ struct kmb_drm_private {
        spinlock_t                      irq_lock;
        int                             irq_lcd;
        int                             sys_clk_mhz;
+       struct disp_cfg                 init_disp_cfg[KMB_MAX_PLANES];
        struct layer_status             plane_status[KMB_MAX_PLANES];
        int                             kmb_under_flow;
        int                             kmb_flush_done;
index 1793cd31b1178b457143a6386cfc4dfff85379f8..f6071882054c7d1358e996c595f2aa7940f06ead 100644 (file)
@@ -482,6 +482,10 @@ static u32 mipi_tx_fg_section_cfg(struct kmb_dsi *kmb_dsi,
        return 0;
 }
 
+#define CLK_DIFF_LOW 50
+#define CLK_DIFF_HI 60
+#define SYSCLK_500  500
+
 static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen,
                                struct mipi_tx_frame_timing_cfg *fg_cfg)
 {
@@ -492,7 +496,12 @@ static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen,
        /* 500 Mhz system clock minus 50 to account for the difference in
         * MIPI clock speed in RTL tests
         */
-       sysclk = kmb_dsi->sys_clk_mhz - 50;
+       if (kmb_dsi->sys_clk_mhz == SYSCLK_500) {
+               sysclk = kmb_dsi->sys_clk_mhz - CLK_DIFF_LOW;
+       } else {
+               /* 700 Mhz clk*/
+               sysclk = kmb_dsi->sys_clk_mhz - CLK_DIFF_HI;
+       }
 
        /* PPL-Pixel Packing Layer, LLP-Low Level Protocol
         * Frame genartor timing parameters are clocked on the system clock,
@@ -1322,7 +1331,8 @@ static u32 mipi_tx_init_dphy(struct kmb_dsi *kmb_dsi,
        return 0;
 }
 
-static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
+static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi,
+                               struct drm_atomic_state *old_state)
 {
        struct regmap *msscam;
 
@@ -1331,7 +1341,7 @@ static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
                dev_dbg(kmb_dsi->dev, "failed to get msscam syscon");
                return;
        }
-
+       drm_atomic_bridge_chain_enable(adv_bridge, old_state);
        /* DISABLE MIPI->CIF CONNECTION */
        regmap_write(msscam, MSS_MIPI_CIF_CFG, 0);
 
@@ -1342,7 +1352,7 @@ static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
 }
 
 int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
-                    int sys_clk_mhz)
+                    int sys_clk_mhz, struct drm_atomic_state *old_state)
 {
        u64 data_rate;
 
@@ -1384,18 +1394,13 @@ int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
                mipi_tx_init_cfg.lane_rate_mbps = data_rate;
        }
 
-       kmb_write_mipi(kmb_dsi, DPHY_ENABLE, 0);
-       kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL0, 0);
-       kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL1, 0);
-       kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL2, 0);
-
        /* Initialize mipi controller */
        mipi_tx_init_cntrl(kmb_dsi, &mipi_tx_init_cfg);
 
        /* Dphy initialization */
        mipi_tx_init_dphy(kmb_dsi, &mipi_tx_init_cfg);
 
-       connect_lcd_to_mipi(kmb_dsi);
+       connect_lcd_to_mipi(kmb_dsi, old_state);
        dev_info(kmb_dsi->dev, "mipi hw initialized");
 
        return 0;
index 66b7c500d9bcf316be8a5e867e101ae763b98b7b..09dc88743d7799f361537c7ad57b55e26b2a91bd 100644 (file)
@@ -380,7 +380,7 @@ int kmb_dsi_host_bridge_init(struct device *dev);
 struct kmb_dsi *kmb_dsi_init(struct platform_device *pdev);
 void kmb_dsi_host_unregister(struct kmb_dsi *kmb_dsi);
 int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
-                    int sys_clk_mhz);
+                    int sys_clk_mhz, struct drm_atomic_state *old_state);
 int kmb_dsi_map_mmio(struct kmb_dsi *kmb_dsi);
 int kmb_dsi_clk_init(struct kmb_dsi *kmb_dsi);
 int kmb_dsi_encoder_init(struct drm_device *dev, struct kmb_dsi *kmb_dsi);
index ecee6782612d87342d2800265633d114572fd72b..00404ba4126ddcb787fdbc939755578c23875674 100644 (file)
@@ -67,8 +67,21 @@ static const u32 kmb_formats_v[] = {
 
 static unsigned int check_pixel_format(struct drm_plane *plane, u32 format)
 {
+       struct kmb_drm_private *kmb;
+       struct kmb_plane *kmb_plane = to_kmb_plane(plane);
        int i;
+       int plane_id = kmb_plane->id;
+       struct disp_cfg init_disp_cfg;
 
+       kmb = to_kmb(plane->dev);
+       init_disp_cfg = kmb->init_disp_cfg[plane_id];
+       /* Due to HW limitations, changing pixel format after initial
+        * plane configuration is not supported.
+        */
+       if (init_disp_cfg.format && init_disp_cfg.format != format) {
+               drm_dbg(&kmb->drm, "Cannot change format after initial plane configuration");
+               return -EINVAL;
+       }
        for (i = 0; i < plane->format_count; i++) {
                if (plane->format_types[i] == format)
                        return 0;
@@ -81,11 +94,17 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
 {
        struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
                                                                                 plane);
+       struct kmb_drm_private *kmb;
+       struct kmb_plane *kmb_plane = to_kmb_plane(plane);
+       int plane_id = kmb_plane->id;
+       struct disp_cfg init_disp_cfg;
        struct drm_framebuffer *fb;
        int ret;
        struct drm_crtc_state *crtc_state;
        bool can_position;
 
+       kmb = to_kmb(plane->dev);
+       init_disp_cfg = kmb->init_disp_cfg[plane_id];
        fb = new_plane_state->fb;
        if (!fb || !new_plane_state->crtc)
                return 0;
@@ -94,10 +113,21 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
        if (ret)
                return ret;
 
-       if (new_plane_state->crtc_w > KMB_MAX_WIDTH || new_plane_state->crtc_h > KMB_MAX_HEIGHT)
+       if (new_plane_state->crtc_w > KMB_FB_MAX_WIDTH ||
+           new_plane_state->crtc_h > KMB_FB_MAX_HEIGHT ||
+           new_plane_state->crtc_w < KMB_FB_MIN_WIDTH ||
+           new_plane_state->crtc_h < KMB_FB_MIN_HEIGHT)
                return -EINVAL;
-       if (new_plane_state->crtc_w < KMB_MIN_WIDTH || new_plane_state->crtc_h < KMB_MIN_HEIGHT)
+
+       /* Due to HW limitations, changing plane height or width after
+        * initial plane configuration is not supported.
+        */
+       if ((init_disp_cfg.width && init_disp_cfg.height) &&
+           (init_disp_cfg.width != fb->width ||
+           init_disp_cfg.height != fb->height)) {
+               drm_dbg(&kmb->drm, "Cannot change plane height or width after initial configuration");
                return -EINVAL;
+       }
        can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY);
        crtc_state =
                drm_atomic_get_existing_crtc_state(state,
@@ -277,6 +307,44 @@ static void config_csc(struct kmb_drm_private *kmb, int plane_id)
        kmb_write_lcd(kmb, LCD_LAYERn_CSC_OFF3(plane_id), csc_coef_lcd[11]);
 }
 
+static void kmb_plane_set_alpha(struct kmb_drm_private *kmb,
+                               const struct drm_plane_state *state,
+                               unsigned char plane_id,
+                               unsigned int *val)
+{
+       u16 plane_alpha = state->alpha;
+       u16 pixel_blend_mode = state->pixel_blend_mode;
+       int has_alpha = state->fb->format->has_alpha;
+
+       if (plane_alpha != DRM_BLEND_ALPHA_OPAQUE)
+               *val |= LCD_LAYER_ALPHA_STATIC;
+
+       if (has_alpha) {
+               switch (pixel_blend_mode) {
+               case DRM_MODE_BLEND_PIXEL_NONE:
+                       break;
+               case DRM_MODE_BLEND_PREMULTI:
+                       *val |= LCD_LAYER_ALPHA_EMBED | LCD_LAYER_ALPHA_PREMULT;
+                       break;
+               case DRM_MODE_BLEND_COVERAGE:
+                       *val |= LCD_LAYER_ALPHA_EMBED;
+                       break;
+               default:
+                       DRM_DEBUG("Missing pixel blend mode case (%s == %ld)\n",
+                                 __stringify(pixel_blend_mode),
+                                 (long)pixel_blend_mode);
+                       break;
+               }
+       }
+
+       if (plane_alpha == DRM_BLEND_ALPHA_OPAQUE && !has_alpha) {
+               *val &= LCD_LAYER_ALPHA_DISABLED;
+               return;
+       }
+
+       kmb_write_lcd(kmb, LCD_LAYERn_ALPHA(plane_id), plane_alpha);
+}
+
 static void kmb_plane_atomic_update(struct drm_plane *plane,
                                    struct drm_atomic_state *state)
 {
@@ -296,6 +364,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
        unsigned char plane_id;
        int num_planes;
        static dma_addr_t addr[MAX_SUB_PLANES];
+       struct disp_cfg *init_disp_cfg;
 
        if (!plane || !new_plane_state || !old_plane_state)
                return;
@@ -303,11 +372,12 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
        fb = new_plane_state->fb;
        if (!fb)
                return;
+
        num_planes = fb->format->num_planes;
        kmb_plane = to_kmb_plane(plane);
-       plane_id = kmb_plane->id;
 
        kmb = to_kmb(plane->dev);
+       plane_id = kmb_plane->id;
 
        spin_lock_irq(&kmb->irq_lock);
        if (kmb->kmb_under_flow || kmb->kmb_flush_done) {
@@ -317,7 +387,8 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
        }
        spin_unlock_irq(&kmb->irq_lock);
 
-       src_w = (new_plane_state->src_w >> 16);
+       init_disp_cfg = &kmb->init_disp_cfg[plane_id];
+       src_w = new_plane_state->src_w >> 16;
        src_h = new_plane_state->src_h >> 16;
        crtc_x = new_plane_state->crtc_x;
        crtc_y = new_plane_state->crtc_y;
@@ -400,20 +471,32 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
                config_csc(kmb, plane_id);
        }
 
+       kmb_plane_set_alpha(kmb, plane->state, plane_id, &val);
+
        kmb_write_lcd(kmb, LCD_LAYERn_CFG(plane_id), val);
 
+       /* Configure LCD_CONTROL */
+       ctrl = kmb_read_lcd(kmb, LCD_CONTROL);
+
+       /* Set layer blending config */
+       ctrl &= ~LCD_CTRL_ALPHA_ALL;
+       ctrl |= LCD_CTRL_ALPHA_BOTTOM_VL1 |
+               LCD_CTRL_ALPHA_BLEND_VL2;
+
+       ctrl &= ~LCD_CTRL_ALPHA_BLEND_BKGND_DISABLE;
+
        switch (plane_id) {
        case LAYER_0:
-               ctrl = LCD_CTRL_VL1_ENABLE;
+               ctrl |= LCD_CTRL_VL1_ENABLE;
                break;
        case LAYER_1:
-               ctrl = LCD_CTRL_VL2_ENABLE;
+               ctrl |= LCD_CTRL_VL2_ENABLE;
                break;
        case LAYER_2:
-               ctrl = LCD_CTRL_GL1_ENABLE;
+               ctrl |= LCD_CTRL_GL1_ENABLE;
                break;
        case LAYER_3:
-               ctrl = LCD_CTRL_GL2_ENABLE;
+               ctrl |= LCD_CTRL_GL2_ENABLE;
                break;
        }
 
@@ -425,7 +508,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
         */
        ctrl |= LCD_CTRL_VHSYNC_IDLE_LVL;
 
-       kmb_set_bitmask_lcd(kmb, LCD_CONTROL, ctrl);
+       kmb_write_lcd(kmb, LCD_CONTROL, ctrl);
 
        /* Enable pipeline AXI read transactions for the DMA
         * after setting graphics layers. This must be done
@@ -448,6 +531,16 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
 
        /* Enable DMA */
        kmb_write_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id), dma_cfg);
+
+       /* Save initial display config */
+       if (!init_disp_cfg->width ||
+           !init_disp_cfg->height ||
+           !init_disp_cfg->format) {
+               init_disp_cfg->width = width;
+               init_disp_cfg->height = height;
+               init_disp_cfg->format = fb->format->format;
+       }
+
        drm_dbg(&kmb->drm, "dma_cfg=0x%x LCD_DMA_CFG=0x%x\n", dma_cfg,
                kmb_read_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id)));
 
@@ -490,6 +583,9 @@ struct kmb_plane *kmb_plane_init(struct drm_device *drm)
        enum drm_plane_type plane_type;
        const u32 *plane_formats;
        int num_plane_formats;
+       unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+                                 BIT(DRM_MODE_BLEND_PREMULTI)   |
+                                 BIT(DRM_MODE_BLEND_COVERAGE);
 
        for (i = 0; i < KMB_MAX_PLANES; i++) {
                plane = drmm_kzalloc(drm, sizeof(*plane), GFP_KERNEL);
@@ -521,8 +617,16 @@ struct kmb_plane *kmb_plane_init(struct drm_device *drm)
                drm_dbg(drm, "%s : %d i=%d type=%d",
                        __func__, __LINE__,
                          i, plane_type);
+               drm_plane_create_alpha_property(&plane->base_plane);
+
+               drm_plane_create_blend_mode_property(&plane->base_plane,
+                                                    blend_caps);
+
+               drm_plane_create_zpos_immutable_property(&plane->base_plane, i);
+
                drm_plane_helper_add(&plane->base_plane,
                                     &kmb_plane_helper_funcs);
+
                if (plane_type == DRM_PLANE_TYPE_PRIMARY) {
                        primary = plane;
                        kmb->plane = plane;
index 486490f7a3ec55b3dc63b680ae57fa4cfd141901..b51144044fe8ec0bcd564ee359487c6805e642b2 100644 (file)
@@ -35,6 +35,9 @@
 #define POSSIBLE_CRTCS 1
 #define to_kmb_plane(x) container_of(x, struct kmb_plane, base_plane)
 
+#define POSSIBLE_CRTCS         1
+#define KMB_MAX_PLANES         2
+
 enum layer_id {
        LAYER_0,
        LAYER_1,
@@ -43,8 +46,6 @@ enum layer_id {
        /* KMB_MAX_PLANES */
 };
 
-#define KMB_MAX_PLANES 1
-
 enum sub_plane_id {
        Y_PLANE,
        U_PLANE,
@@ -62,6 +63,12 @@ struct layer_status {
        u32 ctrl;
 };
 
+struct disp_cfg {
+       unsigned int width;
+       unsigned int height;
+       unsigned int format;
+};
+
 struct kmb_plane *kmb_plane_init(struct drm_device *drm);
 void kmb_plane_destroy(struct drm_plane *plane);
 #endif /* __KMB_PLANE_H__ */
index 48150569f7025f64955f3e7690a383279e3738d0..9756101b0d32f8609191ed626197e5aec6982f17 100644 (file)
 #define LCD_CTRL_OUTPUT_ENABLED                          BIT(19)
 #define LCD_CTRL_BPORCH_ENABLE                   BIT(21)
 #define LCD_CTRL_FPORCH_ENABLE                   BIT(22)
+#define LCD_CTRL_ALPHA_BLEND_BKGND_DISABLE       BIT(23)
 #define LCD_CTRL_PIPELINE_DMA                    BIT(28)
 #define LCD_CTRL_VHSYNC_IDLE_LVL                 BIT(31)
+#define LCD_CTRL_ALPHA_ALL                       (0xff << 6)
 
 /* interrupts */
 #define LCD_INT_STATUS                         (0x4 * 0x001)
 #define LCD_LAYER_ALPHA_EMBED                  BIT(5)
 #define LCD_LAYER_ALPHA_COMBI                  (LCD_LAYER_ALPHA_STATIC | \
                                                      LCD_LAYER_ALPHA_EMBED)
+#define LCD_LAYER_ALPHA_DISABLED               ~(LCD_LAYER_ALPHA_COMBI)
 /* RGB multiplied with alpha */
 #define LCD_LAYER_ALPHA_PREMULT                        BIT(6)
 #define LCD_LAYER_INVERT_COL                   BIT(7)
index 5f81489fc60c7959a003d17094a22224d6f40360..a4e80e4996748d84bb1e8e170718491c696ad35e 100644 (file)
@@ -4,8 +4,6 @@
  */
 
 #include <linux/clk.h>
-#include <linux/dma-mapping.h>
-#include <linux/mailbox_controller.h>
 #include <linux/pm_runtime.h>
 #include <linux/soc/mediatek/mtk-cmdq.h>
 #include <linux/soc/mediatek/mtk-mmsys.h>
@@ -52,11 +50,8 @@ struct mtk_drm_crtc {
        bool                            pending_async_planes;
 
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
-       struct mbox_client              cmdq_cl;
-       struct mbox_chan                *cmdq_chan;
-       struct cmdq_pkt                 cmdq_handle;
+       struct cmdq_client              *cmdq_client;
        u32                             cmdq_event;
-       u32                             cmdq_vblank_cnt;
 #endif
 
        struct device                   *mmsys_dev;
@@ -227,79 +222,9 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
 }
 
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
-static int mtk_drm_cmdq_pkt_create(struct mbox_chan *chan, struct cmdq_pkt *pkt,
-                                   size_t size)
+static void ddp_cmdq_cb(struct cmdq_cb_data data)
 {
-       struct device *dev;
-       dma_addr_t dma_addr;
-
-       pkt->va_base = kzalloc(size, GFP_KERNEL);
-       if (!pkt->va_base) {
-               kfree(pkt);
-               return -ENOMEM;
-       }
-       pkt->buf_size = size;
-
-       dev = chan->mbox->dev;
-       dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
-                                 DMA_TO_DEVICE);
-       if (dma_mapping_error(dev, dma_addr)) {
-               dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
-               kfree(pkt->va_base);
-               kfree(pkt);
-               return -ENOMEM;
-       }
-
-       pkt->pa_base = dma_addr;
-
-       return 0;
-}
-
-static void mtk_drm_cmdq_pkt_destroy(struct mbox_chan *chan, struct cmdq_pkt *pkt)
-{
-       dma_unmap_single(chan->mbox->dev, pkt->pa_base, pkt->buf_size,
-                        DMA_TO_DEVICE);
-       kfree(pkt->va_base);
-       kfree(pkt);
-}
-
-static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
-{
-       struct mtk_drm_crtc *mtk_crtc = container_of(cl, struct mtk_drm_crtc, cmdq_cl);
-       struct cmdq_cb_data *data = mssg;
-       struct mtk_crtc_state *state;
-       unsigned int i;
-
-       state = to_mtk_crtc_state(mtk_crtc->base.state);
-
-       state->pending_config = false;
-
-       if (mtk_crtc->pending_planes) {
-               for (i = 0; i < mtk_crtc->layer_nr; i++) {
-                       struct drm_plane *plane = &mtk_crtc->planes[i];
-                       struct mtk_plane_state *plane_state;
-
-                       plane_state = to_mtk_plane_state(plane->state);
-
-                       plane_state->pending.config = false;
-               }
-               mtk_crtc->pending_planes = false;
-       }
-
-       if (mtk_crtc->pending_async_planes) {
-               for (i = 0; i < mtk_crtc->layer_nr; i++) {
-                       struct drm_plane *plane = &mtk_crtc->planes[i];
-                       struct mtk_plane_state *plane_state;
-
-                       plane_state = to_mtk_plane_state(plane->state);
-
-                       plane_state->pending.async_config = false;
-               }
-               mtk_crtc->pending_async_planes = false;
-       }
-
-       mtk_crtc->cmdq_vblank_cnt = 0;
-       mtk_drm_cmdq_pkt_destroy(mtk_crtc->cmdq_chan, data->pkt);
+       cmdq_pkt_destroy(data.data);
 }
 #endif
 
@@ -453,8 +378,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
                                    state->pending_vrefresh, 0,
                                    cmdq_handle);
 
-               if (!cmdq_handle)
-                       state->pending_config = false;
+               state->pending_config = false;
        }
 
        if (mtk_crtc->pending_planes) {
@@ -474,12 +398,9 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
                                mtk_ddp_comp_layer_config(comp, local_layer,
                                                          plane_state,
                                                          cmdq_handle);
-                       if (!cmdq_handle)
-                               plane_state->pending.config = false;
+                       plane_state->pending.config = false;
                }
-
-               if (!cmdq_handle)
-                       mtk_crtc->pending_planes = false;
+               mtk_crtc->pending_planes = false;
        }
 
        if (mtk_crtc->pending_async_planes) {
@@ -499,12 +420,9 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
                                mtk_ddp_comp_layer_config(comp, local_layer,
                                                          plane_state,
                                                          cmdq_handle);
-                       if (!cmdq_handle)
-                               plane_state->pending.async_config = false;
+                       plane_state->pending.async_config = false;
                }
-
-               if (!cmdq_handle)
-                       mtk_crtc->pending_async_planes = false;
+               mtk_crtc->pending_async_planes = false;
        }
 }
 
@@ -512,7 +430,7 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
                                       bool needs_vblank)
 {
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
-       struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle;
+       struct cmdq_pkt *cmdq_handle;
 #endif
        struct drm_crtc *crtc = &mtk_crtc->base;
        struct mtk_drm_private *priv = crtc->dev->dev_private;
@@ -550,24 +468,14 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
                mtk_mutex_release(mtk_crtc->mutex);
        }
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
-       if (mtk_crtc->cmdq_chan) {
-               mbox_flush(mtk_crtc->cmdq_chan, 2000);
-               cmdq_handle->cmd_buf_size = 0;
+       if (mtk_crtc->cmdq_client) {
+               mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
+               cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
                cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
                cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
                mtk_crtc_ddp_config(crtc, cmdq_handle);
                cmdq_pkt_finalize(cmdq_handle);
-               dma_sync_single_for_device(mtk_crtc->cmdq_chan->mbox->dev,
-                                           cmdq_handle->pa_base,
-                                           cmdq_handle->cmd_buf_size,
-                                           DMA_TO_DEVICE);
-               /*
-                * CMDQ command should execute in next vblank,
-                * If it fail to execute in next 2 vblank, timeout happen.
-                */
-               mtk_crtc->cmdq_vblank_cnt = 2;
-               mbox_send_message(mtk_crtc->cmdq_chan, cmdq_handle);
-               mbox_client_txdone(mtk_crtc->cmdq_chan, 0);
+               cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
        }
 #endif
        mtk_crtc->config_updating = false;
@@ -581,15 +489,12 @@ static void mtk_crtc_ddp_irq(void *data)
        struct mtk_drm_private *priv = crtc->dev->dev_private;
 
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
-       if (!priv->data->shadow_register && !mtk_crtc->cmdq_chan)
-               mtk_crtc_ddp_config(crtc, NULL);
-       else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0)
-               DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n",
-                         drm_crtc_index(&mtk_crtc->base));
+       if (!priv->data->shadow_register && !mtk_crtc->cmdq_client)
 #else
        if (!priv->data->shadow_register)
-               mtk_crtc_ddp_config(crtc, NULL);
 #endif
+               mtk_crtc_ddp_config(crtc, NULL);
+
        mtk_drm_finish_page_flip(mtk_crtc);
 }
 
@@ -924,20 +829,16 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
        mutex_init(&mtk_crtc->hw_lock);
 
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
-       mtk_crtc->cmdq_cl.dev = mtk_crtc->mmsys_dev;
-       mtk_crtc->cmdq_cl.tx_block = false;
-       mtk_crtc->cmdq_cl.knows_txdone = true;
-       mtk_crtc->cmdq_cl.rx_callback = ddp_cmdq_cb;
-       mtk_crtc->cmdq_chan =
-                       mbox_request_channel(&mtk_crtc->cmdq_cl,
-                                             drm_crtc_index(&mtk_crtc->base));
-       if (IS_ERR(mtk_crtc->cmdq_chan)) {
+       mtk_crtc->cmdq_client =
+                       cmdq_mbox_create(mtk_crtc->mmsys_dev,
+                                        drm_crtc_index(&mtk_crtc->base));
+       if (IS_ERR(mtk_crtc->cmdq_client)) {
                dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
                        drm_crtc_index(&mtk_crtc->base));
-               mtk_crtc->cmdq_chan = NULL;
+               mtk_crtc->cmdq_client = NULL;
        }
 
-       if (mtk_crtc->cmdq_chan) {
+       if (mtk_crtc->cmdq_client) {
                ret = of_property_read_u32_index(priv->mutex_node,
                                                 "mediatek,gce-events",
                                                 drm_crtc_index(&mtk_crtc->base),
@@ -945,18 +846,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
                if (ret) {
                        dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
                                drm_crtc_index(&mtk_crtc->base));
-                       mbox_free_channel(mtk_crtc->cmdq_chan);
-                       mtk_crtc->cmdq_chan = NULL;
-               } else {
-                       ret = mtk_drm_cmdq_pkt_create(mtk_crtc->cmdq_chan,
-                                                      &mtk_crtc->cmdq_handle,
-                                                      PAGE_SIZE);
-                       if (ret) {
-                               dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n",
-                                       drm_crtc_index(&mtk_crtc->base));
-                               mbox_free_channel(mtk_crtc->cmdq_chan);
-                               mtk_crtc->cmdq_chan = NULL;
-                       }
+                       cmdq_mbox_destroy(mtk_crtc->cmdq_client);
+                       mtk_crtc->cmdq_client = NULL;
                }
        }
 #endif
index e9c6af78b1d7c340d996f85644a3fbb39d8aa9f9..3ddf739a6f9b8b10dd5f49977cfbd7d9ac1f5c48 100644 (file)
@@ -17,7 +17,7 @@ config DRM_MSM
        select DRM_SCHED
        select SHMEM
        select TMPFS
-       select QCOM_SCM if ARCH_QCOM
+       select QCOM_SCM
        select WANT_DEV_COREDUMP
        select SND_SOC_HDMI_CODEC if SND_SOC
        select SYNC_FILE
@@ -55,7 +55,7 @@ config DRM_MSM_GPU_SUDO
 
 config DRM_MSM_HDMI_HDCP
        bool "Enable HDMI HDCP support in MSM DRM driver"
-       depends on DRM_MSM && QCOM_SCM
+       depends on DRM_MSM
        default y
        help
          Choose this option to enable HDCP state machine
index 4534633fe7cdb267718cbf58efef5ab5554203a4..8fb847c174ff840a26d778df02f1532221456304 100644 (file)
@@ -571,13 +571,14 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
        }
 
        icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
-       ret = IS_ERR(icc_path);
-       if (ret)
+       if (IS_ERR(icc_path)) {
+               ret = PTR_ERR(icc_path);
                goto fail;
+       }
 
        ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem");
-       ret = IS_ERR(ocmem_icc_path);
-       if (ret) {
+       if (IS_ERR(ocmem_icc_path)) {
+               ret = PTR_ERR(ocmem_icc_path);
                /* allow -ENODATA, ocmem icc is optional */
                if (ret != -ENODATA)
                        goto fail;
index 82bebb40234de14016e4e25768690706d8120283..a96ee79cc5e0886029b2ef94216562ace0d83c3e 100644 (file)
@@ -699,13 +699,14 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
        }
 
        icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
-       ret = IS_ERR(icc_path);
-       if (ret)
+       if (IS_ERR(icc_path)) {
+               ret = PTR_ERR(icc_path);
                goto fail;
+       }
 
        ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem");
-       ret = IS_ERR(ocmem_icc_path);
-       if (ret) {
+       if (IS_ERR(ocmem_icc_path)) {
+               ret = PTR_ERR(ocmem_icc_path);
                /* allow -ENODATA, ocmem icc is optional */
                if (ret != -ENODATA)
                        goto fail;
index a7c58018959fbf28266e0f406a1e47a8b45e0be2..8b73f70766a4771771f2dd04cb98822554c3e6e8 100644 (file)
@@ -296,6 +296,8 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
        u32 val;
        int request, ack;
 
+       WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
+
        if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
                return -EINVAL;
 
@@ -337,6 +339,8 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
 {
        int bit;
 
+       WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
+
        if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
                return;
 
@@ -1482,6 +1486,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
        if (!pdev)
                return -ENODEV;
 
+       mutex_init(&gmu->lock);
+
        gmu->dev = &pdev->dev;
 
        of_dma_configure(gmu->dev, node, true);
index 3c74f64e31262cc9a13e4bd9b944542cbf8b1269..84bd516f01e895b27a54756463427779f54eaa43 100644 (file)
@@ -44,6 +44,9 @@ struct a6xx_gmu_bo {
 struct a6xx_gmu {
        struct device *dev;
 
+       /* For serializing communication with the GMU: */
+       struct mutex lock;
+
        struct msm_gem_address_space *aspace;
 
        void * __iomem mmio;
index 40c9fef457a49122b2bfa24570f1a477d13748a4..267a880811d654c78ba89de035fd660026afe898 100644 (file)
@@ -106,7 +106,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
        u32 asid;
        u64 memptr = rbmemptr(ring, ttbr0);
 
-       if (ctx == a6xx_gpu->cur_ctx)
+       if (ctx->seqno == a6xx_gpu->cur_ctx_seqno)
                return;
 
        if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
@@ -139,7 +139,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
        OUT_PKT7(ring, CP_EVENT_WRITE, 1);
        OUT_RING(ring, 0x31);
 
-       a6xx_gpu->cur_ctx = ctx;
+       a6xx_gpu->cur_ctx_seqno = ctx->seqno;
 }
 
 static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
@@ -881,7 +881,7 @@ static int a6xx_zap_shader_init(struct msm_gpu *gpu)
          A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
          A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
 
-static int a6xx_hw_init(struct msm_gpu *gpu)
+static int hw_init(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
@@ -1081,7 +1081,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
        /* Always come up on rb 0 */
        a6xx_gpu->cur_ring = gpu->rb[0];
 
-       a6xx_gpu->cur_ctx = NULL;
+       a6xx_gpu->cur_ctx_seqno = 0;
 
        /* Enable the SQE_to start the CP engine */
        gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
@@ -1135,6 +1135,19 @@ out:
        return ret;
 }
 
+static int a6xx_hw_init(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+       int ret;
+
+       mutex_lock(&a6xx_gpu->gmu.lock);
+       ret = hw_init(gpu);
+       mutex_unlock(&a6xx_gpu->gmu.lock);
+
+       return ret;
+}
+
 static void a6xx_dump(struct msm_gpu *gpu)
 {
        DRM_DEV_INFO(&gpu->pdev->dev, "status:   %08x\n",
@@ -1509,7 +1522,9 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
 
        trace_msm_gpu_resume(0);
 
+       mutex_lock(&a6xx_gpu->gmu.lock);
        ret = a6xx_gmu_resume(a6xx_gpu);
+       mutex_unlock(&a6xx_gpu->gmu.lock);
        if (ret)
                return ret;
 
@@ -1532,7 +1547,9 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
 
        msm_devfreq_suspend(gpu);
 
+       mutex_lock(&a6xx_gpu->gmu.lock);
        ret = a6xx_gmu_stop(a6xx_gpu);
+       mutex_unlock(&a6xx_gpu->gmu.lock);
        if (ret)
                return ret;
 
@@ -1547,18 +1564,19 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
-       static DEFINE_MUTEX(perfcounter_oob);
 
-       mutex_lock(&perfcounter_oob);
+       mutex_lock(&a6xx_gpu->gmu.lock);
 
        /* Force the GPU power on so we can read this register */
        a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
 
        *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
-               REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
+                           REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
 
        a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
-       mutex_unlock(&perfcounter_oob);
+
+       mutex_unlock(&a6xx_gpu->gmu.lock);
+
        return 0;
 }
 
@@ -1622,6 +1640,16 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
        return (unsigned long)busy_time;
 }
 
+void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+       mutex_lock(&a6xx_gpu->gmu.lock);
+       a6xx_gmu_set_freq(gpu, opp);
+       mutex_unlock(&a6xx_gpu->gmu.lock);
+}
+
 static struct msm_gem_address_space *
 a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
 {
@@ -1766,7 +1794,7 @@ static const struct adreno_gpu_funcs funcs = {
 #endif
                .gpu_busy = a6xx_gpu_busy,
                .gpu_get_freq = a6xx_gmu_get_freq,
-               .gpu_set_freq = a6xx_gmu_set_freq,
+               .gpu_set_freq = a6xx_gpu_set_freq,
 #if defined(CONFIG_DRM_MSM_GPU_STATE)
                .gpu_state_get = a6xx_gpu_state_get,
                .gpu_state_put = a6xx_gpu_state_put,
@@ -1810,6 +1838,13 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
                        adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), info->rev)))
                adreno_gpu->base.hw_apriv = true;
 
+       /*
+        * For now only clamp to idle freq for devices where this is known not
+        * to cause power supply issues:
+        */
+       if (info && (info->revn == 618))
+               gpu->clamp_to_idle = true;
+
        a6xx_llc_slices_init(pdev, a6xx_gpu);
 
        ret = a6xx_set_supported_hw(&pdev->dev, config->rev);
index 0bc2d062f54ab15a52e925a30ef0de130af92e83..8e5527c881b1e83e820b6919f6e4ea18916510b6 100644 (file)
@@ -19,7 +19,16 @@ struct a6xx_gpu {
        uint64_t sqe_iova;
 
        struct msm_ringbuffer *cur_ring;
-       struct msm_file_private *cur_ctx;
+
+       /**
+        * cur_ctx_seqno:
+        *
+        * The ctx->seqno value of the context with current pgtables
+        * installed.  Tracked by seqno rather than pointer value to
+        * avoid dangling pointers, and cases where a ctx can be freed
+        * and a new one created with the same address.
+        */
+       int cur_ctx_seqno;
 
        struct a6xx_gmu gmu;
 
index b131fd376192b6a9aede1cb4cdea0af7efe65910..700d65e39feb02471d7520b6ae9b2c1f82901a0e 100644 (file)
@@ -794,7 +794,7 @@ static const struct dpu_pingpong_cfg sm8150_pp[] = {
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
                        -1),
        PP_BLK("pingpong_5", PINGPONG_5, 0x72800, MERGE_3D_2, sdm845_pp_sblk,
-                       DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+                       DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
                        -1),
 };
 
index f482e0911d039d3dce49b263e934a0ee201d2ce5..bb7d066618e6490bbffbb2cc16aedc0438be79fe 100644 (file)
@@ -1125,6 +1125,20 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc)
        __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
 }
 
+static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
+       .set_config = drm_atomic_helper_set_config,
+       .destroy = mdp5_crtc_destroy,
+       .page_flip = drm_atomic_helper_page_flip,
+       .reset = mdp5_crtc_reset,
+       .atomic_duplicate_state = mdp5_crtc_duplicate_state,
+       .atomic_destroy_state = mdp5_crtc_destroy_state,
+       .atomic_print_state = mdp5_crtc_atomic_print_state,
+       .get_vblank_counter = mdp5_crtc_get_vblank_counter,
+       .enable_vblank  = msm_crtc_enable_vblank,
+       .disable_vblank = msm_crtc_disable_vblank,
+       .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+};
+
 static const struct drm_crtc_funcs mdp5_crtc_funcs = {
        .set_config = drm_atomic_helper_set_config,
        .destroy = mdp5_crtc_destroy,
@@ -1313,6 +1327,8 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
        mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;
 
        drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
+                                 cursor_plane ?
+                                 &mdp5_crtc_no_lm_cursor_funcs :
                                  &mdp5_crtc_funcs, NULL);
 
        drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
index fbe4c2cd52a3d9e2cc947801b206c05b1297268f..a0392e4d8134c3ad98f5b35aae65aec846236d16 100644 (file)
@@ -1309,14 +1309,14 @@ static int dp_pm_resume(struct device *dev)
         * can not declared display is connected unless
         * HDMI cable is plugged in and sink_count of
         * dongle become 1
+        * also only signal audio when disconnected
         */
-       if (dp->link->sink_count)
+       if (dp->link->sink_count) {
                dp->dp_display.is_connected = true;
-       else
+       } else {
                dp->dp_display.is_connected = false;
-
-       dp_display_handle_plugged_change(g_dp_display,
-                               dp->dp_display.is_connected);
+               dp_display_handle_plugged_change(g_dp_display, false);
+       }
 
        DRM_DEBUG_DP("After, sink_count=%d is_connected=%d core_inited=%d power_on=%d\n",
                        dp->link->sink_count, dp->dp_display.is_connected,
index 614dc7f26f2c8189e7510367fe9c4137da3823ab..75ae3008b68f4a2bb81f2cb4a439a42174c8d199 100644 (file)
@@ -215,8 +215,10 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
                goto fail;
        }
 
-       if (!msm_dsi_manager_validate_current_config(msm_dsi->id))
+       if (!msm_dsi_manager_validate_current_config(msm_dsi->id)) {
+               ret = -EINVAL;
                goto fail;
+       }
 
        msm_dsi->encoder = encoder;
 
index e269df285136c2c9d56fd0e171e336f0f46b209b..c86b5090fae60db7bc854496fffbdb368eacadaa 100644 (file)
@@ -451,7 +451,7 @@ static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
 
        return 0;
 err:
-       for (; i > 0; i--)
+       while (--i >= 0)
                clk_disable_unprepare(msm_host->bus_clks[i]);
 
        return ret;
index d13552b2213b695ae9e970a310f2f487134375ec..5b4e991f220d684f3bcc3abb23d9c1df210dbb24 100644 (file)
@@ -110,14 +110,13 @@ static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX];
 static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
                                    u32 nb_tries, u32 timeout_us)
 {
-       bool pll_locked = false;
+       bool pll_locked = false, pll_ready = false;
        void __iomem *base = pll_14nm->phy->pll_base;
        u32 tries, val;
 
        tries = nb_tries;
        while (tries--) {
-               val = dsi_phy_read(base +
-                              REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
+               val = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
                pll_locked = !!(val & BIT(5));
 
                if (pll_locked)
@@ -126,23 +125,24 @@ static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
                udelay(timeout_us);
        }
 
-       if (!pll_locked) {
-               tries = nb_tries;
-               while (tries--) {
-                       val = dsi_phy_read(base +
-                               REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
-                       pll_locked = !!(val & BIT(0));
+       if (!pll_locked)
+               goto out;
 
-                       if (pll_locked)
-                               break;
+       tries = nb_tries;
+       while (tries--) {
+               val = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
+               pll_ready = !!(val & BIT(0));
 
-                       udelay(timeout_us);
-               }
+               if (pll_ready)
+                       break;
+
+               udelay(timeout_us);
        }
 
-       DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
+out:
+       DBG("DSI PLL is %slocked, %sready", pll_locked ? "" : "*not* ", pll_ready ? "" : "*not* ");
 
-       return pll_locked;
+       return pll_locked && pll_ready;
 }
 
 static void dsi_pll_14nm_config_init(struct dsi_pll_config *pconf)
index aaa37456f4ee3cf55831cac498823454b1e2f1aa..71ed4aa0dc67e6c7ce1707a3094f8be221166395 100644 (file)
@@ -428,7 +428,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
        bytediv->reg = pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
 
        snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
-       snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id);
+       snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id + 1);
 
        bytediv_init.name = clk_name;
        bytediv_init.ops = &clk_bytediv_ops;
@@ -442,7 +442,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
                return ret;
        provided_clocks[DSI_BYTE_PLL_CLK] = &bytediv->hw;
 
-       snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id);
+       snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id + 1);
        /* DIV3 */
        hw = devm_clk_hw_register_divider(dev, clk_name,
                                parent_name, 0, pll_28nm->phy->pll_base +
index 4fb397ee7c8425c7809cc12e2f4d502faee4b5d1..fe1366b4c49f58083b554de380812b4dacbd762e 100644 (file)
@@ -1116,7 +1116,7 @@ void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on)
 int msm_edp_ctrl_init(struct msm_edp *edp)
 {
        struct edp_ctrl *ctrl = NULL;
-       struct device *dev = &edp->pdev->dev;
+       struct device *dev;
        int ret;
 
        if (!edp) {
@@ -1124,6 +1124,7 @@ int msm_edp_ctrl_init(struct msm_edp *edp)
                return -EINVAL;
        }
 
+       dev = &edp->pdev->dev;
        ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
        if (!ctrl)
                return -ENOMEM;
index 2e6fc185e54da4b3e8a79a8cb5fcfd873bf76ce6..d4e09703a87dbb0db3333636eb0019f03016347f 100644 (file)
@@ -630,10 +630,11 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
        if (ret)
                goto err_msm_uninit;
 
-       ret = msm_disp_snapshot_init(ddev);
-       if (ret)
-               DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
-
+       if (kms) {
+               ret = msm_disp_snapshot_init(ddev);
+               if (ret)
+                       DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
+       }
        drm_mode_config_reset(ddev);
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -682,6 +683,7 @@ static void load_gpu(struct drm_device *dev)
 
 static int context_init(struct drm_device *dev, struct drm_file *file)
 {
+       static atomic_t ident = ATOMIC_INIT(0);
        struct msm_drm_private *priv = dev->dev_private;
        struct msm_file_private *ctx;
 
@@ -689,12 +691,17 @@ static int context_init(struct drm_device *dev, struct drm_file *file)
        if (!ctx)
                return -ENOMEM;
 
+       INIT_LIST_HEAD(&ctx->submitqueues);
+       rwlock_init(&ctx->queuelock);
+
        kref_init(&ctx->ref);
        msm_submitqueue_init(dev, ctx);
 
        ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
        file->driver_priv = ctx;
 
+       ctx->seqno = atomic_inc_return(&ident);
+
        return 0;
 }
 
index 8b005d1ac89974aa61b7ee960c68750af2d07b26..c552f0c3890c1c6107bbb9b70c2de50cbaeae3d9 100644 (file)
@@ -53,14 +53,6 @@ struct msm_disp_state;
 
 #define FRAC_16_16(mult, div)    (((mult) << 16) / (div))
 
-struct msm_file_private {
-       rwlock_t queuelock;
-       struct list_head submitqueues;
-       int queueid;
-       struct msm_gem_address_space *aspace;
-       struct kref ref;
-};
-
 enum msm_mdp_plane_property {
        PLANE_PROP_ZPOS,
        PLANE_PROP_ALPHA,
@@ -488,41 +480,6 @@ void msm_writel(u32 data, void __iomem *addr);
 u32 msm_readl(const void __iomem *addr);
 void msm_rmw(void __iomem *addr, u32 mask, u32 or);
 
-struct msm_gpu_submitqueue;
-int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
-struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
-               u32 id);
-int msm_submitqueue_create(struct drm_device *drm,
-               struct msm_file_private *ctx,
-               u32 prio, u32 flags, u32 *id);
-int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
-               struct drm_msm_submitqueue_query *args);
-int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
-void msm_submitqueue_close(struct msm_file_private *ctx);
-
-void msm_submitqueue_destroy(struct kref *kref);
-
-static inline void __msm_file_private_destroy(struct kref *kref)
-{
-       struct msm_file_private *ctx = container_of(kref,
-               struct msm_file_private, ref);
-
-       msm_gem_address_space_put(ctx->aspace);
-       kfree(ctx);
-}
-
-static inline void msm_file_private_put(struct msm_file_private *ctx)
-{
-       kref_put(&ctx->ref, __msm_file_private_destroy);
-}
-
-static inline struct msm_file_private *msm_file_private_get(
-       struct msm_file_private *ctx)
-{
-       kref_get(&ctx->ref);
-       return ctx;
-}
-
 #define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
 #define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
 
@@ -547,7 +504,7 @@ static inline int align_pitch(int width, int bpp)
 static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
 {
        ktime_t now = ktime_get();
-       unsigned long remaining_jiffies;
+       s64 remaining_jiffies;
 
        if (ktime_compare(*timeout, now) < 0) {
                remaining_jiffies = 0;
@@ -556,7 +513,7 @@ static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
                remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
        }
 
-       return remaining_jiffies;
+       return clamp(remaining_jiffies, 0LL, (s64)INT_MAX);
 }
 
 #endif /* __MSM_DRV_H__ */
index fdc5367aecaa35190aa1d1afc0172b55ceb18368..151d19e4453cd477233d00cd4c102a26e3c33925 100644 (file)
@@ -46,7 +46,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
        if (!submit)
                return ERR_PTR(-ENOMEM);
 
-       ret = drm_sched_job_init(&submit->base, &queue->entity, queue);
+       ret = drm_sched_job_init(&submit->base, queue->entity, queue);
        if (ret) {
                kfree(submit);
                return ERR_PTR(ret);
@@ -171,7 +171,8 @@ out:
 static int submit_lookup_cmds(struct msm_gem_submit *submit,
                struct drm_msm_gem_submit *args, struct drm_file *file)
 {
-       unsigned i, sz;
+       unsigned i;
+       size_t sz;
        int ret = 0;
 
        for (i = 0; i < args->nr_cmds; i++) {
@@ -907,7 +908,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        /* The scheduler owns a ref now: */
        msm_gem_submit_get(submit);
 
-       drm_sched_entity_push_job(&submit->base, &queue->entity);
+       drm_sched_entity_push_job(&submit->base, queue->entity);
 
        args->fence = submit->fence_id;
 
index 0e4b45bff2e6e1b1d960e7cbbaed71c46ff4a8d9..ee25d556c8a10c99dfbc0f03fba1c253ffbb0c95 100644 (file)
@@ -203,6 +203,10 @@ struct msm_gpu {
        uint32_t suspend_count;
 
        struct msm_gpu_state *crashstate;
+
+       /* Enable clamping to idle freq when inactive: */
+       bool clamp_to_idle;
+
        /* True if the hardware supports expanded apriv (a650 and newer) */
        bool hw_apriv;
 
@@ -257,6 +261,39 @@ struct msm_gpu_perfcntr {
  */
 #define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN)
 
+/**
+ * struct msm_file_private - per-drm_file context
+ *
+ * @queuelock:    synchronizes access to submitqueues list
+ * @submitqueues: list of &msm_gpu_submitqueue created by userspace
+ * @queueid:      counter incremented each time a submitqueue is created,
+ *                used to assign &msm_gpu_submitqueue.id
+ * @aspace:       the per-process GPU address-space
+ * @ref:          reference count
+ * @seqno:        unique per process seqno
+ */
+struct msm_file_private {
+       rwlock_t queuelock;
+       struct list_head submitqueues;
+       int queueid;
+       struct msm_gem_address_space *aspace;
+       struct kref ref;
+       int seqno;
+
+       /**
+        * entities:
+        *
+        * Table of per-priority-level sched entities used by submitqueues
+        * associated with this &drm_file.  Because some userspace apps
+        * make assumptions about rendering from multiple gl contexts
+        * (of the same priority) within the process happening in FIFO
+        * order without requiring any fencing beyond MakeCurrent(), we
+        * create at most one &drm_sched_entity per-process per-priority-
+        * level.
+        */
+       struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
+};
+
 /**
  * msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
  *
@@ -304,6 +341,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
 }
 
 /**
+ * struct msm_gpu_submitqueues - Userspace created context.
+ *
  * A submitqueue is associated with a gl context or vk queue (or equiv)
  * in userspace.
  *
@@ -321,7 +360,7 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
  *             seqno, protected by submitqueue lock
  * @lock:      submitqueue lock
  * @ref:       reference count
- * @entity: the submit job-queue
+ * @entity:    the submit job-queue
  */
 struct msm_gpu_submitqueue {
        int id;
@@ -333,7 +372,7 @@ struct msm_gpu_submitqueue {
        struct idr fence_idr;
        struct mutex lock;
        struct kref ref;
-       struct drm_sched_entity entity;
+       struct drm_sched_entity *entity;
 };
 
 struct msm_gpu_state_bo {
@@ -421,6 +460,33 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
 int msm_gpu_pm_suspend(struct msm_gpu *gpu);
 int msm_gpu_pm_resume(struct msm_gpu *gpu);
 
+int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
+struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
+               u32 id);
+int msm_submitqueue_create(struct drm_device *drm,
+               struct msm_file_private *ctx,
+               u32 prio, u32 flags, u32 *id);
+int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
+               struct drm_msm_submitqueue_query *args);
+int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
+void msm_submitqueue_close(struct msm_file_private *ctx);
+
+void msm_submitqueue_destroy(struct kref *kref);
+
+void __msm_file_private_destroy(struct kref *kref);
+
+static inline void msm_file_private_put(struct msm_file_private *ctx)
+{
+       kref_put(&ctx->ref, __msm_file_private_destroy);
+}
+
+static inline struct msm_file_private *msm_file_private_get(
+       struct msm_file_private *ctx)
+{
+       kref_get(&ctx->ref);
+       return ctx;
+}
+
 void msm_devfreq_init(struct msm_gpu *gpu);
 void msm_devfreq_cleanup(struct msm_gpu *gpu);
 void msm_devfreq_resume(struct msm_gpu *gpu);
index 0a1ee20296a2cddc4b2d1c099d225d395a89ba3c..20006d060b5b5564532710a727bfa08caed96818 100644 (file)
@@ -151,6 +151,9 @@ void msm_devfreq_active(struct msm_gpu *gpu)
        unsigned int idle_time;
        unsigned long target_freq = df->idle_freq;
 
+       if (!df->devfreq)
+               return;
+
        /*
         * Hold devfreq lock to synchronize with get_dev_status()/
         * target() callbacks
@@ -186,6 +189,9 @@ void msm_devfreq_idle(struct msm_gpu *gpu)
        struct msm_gpu_devfreq *df = &gpu->devfreq;
        unsigned long idle_freq, target_freq = 0;
 
+       if (!df->devfreq)
+               return;
+
        /*
         * Hold devfreq lock to synchronize with get_dev_status()/
         * target() callbacks
@@ -194,7 +200,8 @@ void msm_devfreq_idle(struct msm_gpu *gpu)
 
        idle_freq = get_freq(gpu);
 
-       msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
+       if (gpu->clamp_to_idle)
+               msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
 
        df->idle_time = ktime_get();
        df->idle_freq = idle_freq;
index 32a55d81b58b6d3d28e815a06256ad0931ca287b..b8621c6e055460caaf1bc2779945702670af9da1 100644 (file)
@@ -7,6 +7,24 @@
 
 #include "msm_gpu.h"
 
+void __msm_file_private_destroy(struct kref *kref)
+{
+       struct msm_file_private *ctx = container_of(kref,
+               struct msm_file_private, ref);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) {
+               if (!ctx->entities[i])
+                       continue;
+
+               drm_sched_entity_destroy(ctx->entities[i]);
+               kfree(ctx->entities[i]);
+       }
+
+       msm_gem_address_space_put(ctx->aspace);
+       kfree(ctx);
+}
+
 void msm_submitqueue_destroy(struct kref *kref)
 {
        struct msm_gpu_submitqueue *queue = container_of(kref,
@@ -14,8 +32,6 @@ void msm_submitqueue_destroy(struct kref *kref)
 
        idr_destroy(&queue->fence_idr);
 
-       drm_sched_entity_destroy(&queue->entity);
-
        msm_file_private_put(queue->ctx);
 
        kfree(queue);
@@ -61,13 +77,47 @@ void msm_submitqueue_close(struct msm_file_private *ctx)
        }
 }
 
+static struct drm_sched_entity *
+get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
+                unsigned ring_nr, enum drm_sched_priority sched_prio)
+{
+       static DEFINE_MUTEX(entity_lock);
+       unsigned idx = (ring_nr * NR_SCHED_PRIORITIES) + sched_prio;
+
+       /* We should have already validated that the requested priority is
+        * valid by the time we get here.
+        */
+       if (WARN_ON(idx >= ARRAY_SIZE(ctx->entities)))
+               return ERR_PTR(-EINVAL);
+
+       mutex_lock(&entity_lock);
+
+       if (!ctx->entities[idx]) {
+               struct drm_sched_entity *entity;
+               struct drm_gpu_scheduler *sched = &ring->sched;
+               int ret;
+
+               entity = kzalloc(sizeof(*ctx->entities[idx]), GFP_KERNEL);
+
+               ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL);
+               if (ret) {
+                       kfree(entity);
+                       return ERR_PTR(ret);
+               }
+
+               ctx->entities[idx] = entity;
+       }
+
+       mutex_unlock(&entity_lock);
+
+       return ctx->entities[idx];
+}
+
 int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
                u32 prio, u32 flags, u32 *id)
 {
        struct msm_drm_private *priv = drm->dev_private;
        struct msm_gpu_submitqueue *queue;
-       struct msm_ringbuffer *ring;
-       struct drm_gpu_scheduler *sched;
        enum drm_sched_priority sched_prio;
        unsigned ring_nr;
        int ret;
@@ -91,12 +141,10 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
        queue->flags = flags;
        queue->ring_nr = ring_nr;
 
-       ring = priv->gpu->rb[ring_nr];
-       sched = &ring->sched;
-
-       ret = drm_sched_entity_init(&queue->entity,
-                       sched_prio, &sched, 1, NULL);
-       if (ret) {
+       queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
+                                        ring_nr, sched_prio);
+       if (IS_ERR(queue->entity)) {
+               ret = PTR_ERR(queue->entity);
                kfree(queue);
                return ret;
        }
@@ -140,10 +188,6 @@ int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
         */
        default_prio = DIV_ROUND_UP(max_priority, 2);
 
-       INIT_LIST_HEAD(&ctx->submitqueues);
-
-       rwlock_init(&ctx->queuelock);
-
        return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
 }
 
index ec0432fe1bdf85722ad0b36ea5fb21104aec19d2..86d78634a9799012e70b82ac22043fa6e476c720 100644 (file)
@@ -173,7 +173,11 @@ static void mxsfb_irq_disable(struct drm_device *drm)
        struct mxsfb_drm_private *mxsfb = drm->dev_private;
 
        mxsfb_enable_axi_clk(mxsfb);
-       mxsfb->crtc.funcs->disable_vblank(&mxsfb->crtc);
+
+       /* Disable and clear VBLANK IRQ */
+       writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+       writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+
        mxsfb_disable_axi_clk(mxsfb);
 }
 
index b8c31b697797ee294d9804621314a4e61ffa5593..66f32d965c7239496f38f783ddebf2ca0f5b0e33 100644 (file)
@@ -704,6 +704,7 @@ static const struct file_operations nv50_crc_flip_threshold_fops = {
        .open = nv50_crc_debugfs_flip_threshold_open,
        .read = seq_read,
        .write = nv50_crc_debugfs_flip_threshold_set,
+       .release = single_release,
 };
 
 int nv50_head_crc_late_register(struct nv50_head *head)
index d66f97280282a3a2f2a85299280f01a5a174c06f..72099d1e4816902f50a623107af7c64edc6f29aa 100644 (file)
@@ -52,6 +52,7 @@ nv50_head_flush_clr(struct nv50_head *head,
 void
 nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
+       if (asyh->set.curs   ) head->func->curs_set(head, asyh);
        if (asyh->set.olut   ) {
                asyh->olut.offset = nv50_lut_load(&head->olut,
                                                  asyh->olut.buffer,
@@ -67,7 +68,6 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
        if (asyh->set.view   ) head->func->view    (head, asyh);
        if (asyh->set.mode   ) head->func->mode    (head, asyh);
        if (asyh->set.core   ) head->func->core_set(head, asyh);
-       if (asyh->set.curs   ) head->func->curs_set(head, asyh);
        if (asyh->set.base   ) head->func->base    (head, asyh);
        if (asyh->set.ovly   ) head->func->ovly    (head, asyh);
        if (asyh->set.dither ) head->func->dither  (head, asyh);
index c68cc957248e2b268f7fcdc2b0f77b2cf4d4a948..a582c0cb0cb0d2c5919a0b6a2cbbaf685df1651e 100644 (file)
@@ -71,6 +71,7 @@
 #define PASCAL_CHANNEL_GPFIFO_A                       /* cla06f.h */ 0x0000c06f
 #define VOLTA_CHANNEL_GPFIFO_A                        /* clc36f.h */ 0x0000c36f
 #define TURING_CHANNEL_GPFIFO_A                       /* clc36f.h */ 0x0000c46f
+#define AMPERE_CHANNEL_GPFIFO_B                       /* clc36f.h */ 0x0000c76f
 
 #define NV50_DISP                                     /* cl5070.h */ 0x00005070
 #define G82_DISP                                      /* cl5070.h */ 0x00008270
 #define PASCAL_DMA_COPY_B                                            0x0000c1b5
 #define VOLTA_DMA_COPY_A                                             0x0000c3b5
 #define TURING_DMA_COPY_A                                            0x0000c5b5
+#define AMPERE_DMA_COPY_B                                            0x0000c7b5
 
 #define FERMI_DECOMPRESS                                             0x000090b8
 
index 54fab7cc36c1b84ba925ffa78a846fe0918a02bc..64ee82c7c1be5936de87c8ab155b47920fbe91ff 100644 (file)
@@ -77,4 +77,5 @@ int gp100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
 int gp10b_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
 int gv100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
 int tu102_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int ga102_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
 #endif
index 6d07e653f82d5b74a330eadea83ea0a4074da7a8..c58bcdba2c7aa30d2423390b10020f750dd5df96 100644 (file)
@@ -844,6 +844,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
                            struct ttm_resource *, struct ttm_resource *);
                int (*init)(struct nouveau_channel *, u32 handle);
        } _methods[] = {
+               {  "COPY", 4, 0xc7b5, nve0_bo_move_copy, nve0_bo_move_init },
                {  "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
                {  "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
                {  "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
index 80099ef7570226b65ce3fa98bc4b2e19999dedcb..ea7769135b0dcf0cd23aa43e4fb2f3fde31e41e7 100644 (file)
@@ -250,7 +250,8 @@ static int
 nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
                    u64 runlist, bool priv, struct nouveau_channel **pchan)
 {
-       static const u16 oclasses[] = { TURING_CHANNEL_GPFIFO_A,
+       static const u16 oclasses[] = { AMPERE_CHANNEL_GPFIFO_B,
+                                       TURING_CHANNEL_GPFIFO_A,
                                        VOLTA_CHANNEL_GPFIFO_A,
                                        PASCAL_CHANNEL_GPFIFO_A,
                                        MAXWELL_CHANNEL_GPFIFO_A,
@@ -386,7 +387,8 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
 
        nvif_object_map(&chan->user, NULL, 0);
 
-       if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
+       if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO &&
+           chan->user.oclass < AMPERE_CHANNEL_GPFIFO_B) {
                ret = nvif_notify_ctor(&chan->user, "abi16ChanKilled",
                                       nouveau_channel_killed,
                                       true, NV906F_V0_NTFY_KILLED,
index c2bc05eb2e54a2e2dbcbdd4eb13d8cae56f7a770..1cbe01048b9304fe838252fbd70c2217b60dda8d 100644 (file)
@@ -207,6 +207,7 @@ static const struct file_operations nouveau_pstate_fops = {
        .open = nouveau_debugfs_pstate_open,
        .read = seq_read,
        .write = nouveau_debugfs_pstate_set,
+       .release = single_release,
 };
 
 static struct drm_info_list nouveau_debugfs_list[] = {
index 1f828c9f691cd5e1e375087ff2a1a90dcc31c731..6109cd9e339918f5798787aad76cd3f1e69961f6 100644 (file)
@@ -345,6 +345,9 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
        u32 arg0, arg1;
        int ret;
 
+       if (device->info.family >= NV_DEVICE_INFO_V0_AMPERE)
+               return;
+
        /* Allocate channel that has access to the graphics engine. */
        if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
                arg0 = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_GR);
@@ -469,6 +472,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
                case PASCAL_CHANNEL_GPFIFO_A:
                case VOLTA_CHANNEL_GPFIFO_A:
                case TURING_CHANNEL_GPFIFO_A:
+               case AMPERE_CHANNEL_GPFIFO_B:
                        ret = nvc0_fence_create(drm);
                        break;
                default:
index 5b27845075a1c37bca5ab637aa4ff7b14c5e5c9e..8c2ecc282723222a64880e5c782c5dc4224aefb8 100644 (file)
@@ -247,10 +247,8 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
        }
 
        ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL);
-       if (ret) {
-               nouveau_bo_ref(NULL, &nvbo);
+       if (ret)
                return ret;
-       }
 
        /* we restrict allowed domains on nv50+ to only the types
         * that were requested at creation time.  not possibly on
index 7c9c928c319668d48b11d0081f0c60153b36c3c6..c3526a8622e3e204adc453f6226cc2abeea2755f 100644 (file)
@@ -204,7 +204,7 @@ nv84_fence_create(struct nouveau_drm *drm)
        priv->base.context_new = nv84_fence_context_new;
        priv->base.context_del = nv84_fence_context_del;
 
-       priv->base.uevent = true;
+       priv->base.uevent = drm->client.device.info.family < NV_DEVICE_INFO_V0_AMPERE;
 
        mutex_init(&priv->mutex);
 
index 93ddf63d111408ecc354b54eb3e682278899b84e..ca75c5f6ecaf80a96429b5a6a81432183ccd02f5 100644 (file)
@@ -2602,6 +2602,7 @@ nv172_chipset = {
        .top      = { 0x00000001, ga100_top_new },
        .disp     = { 0x00000001, ga102_disp_new },
        .dma      = { 0x00000001, gv100_dma_new },
+       .fifo     = { 0x00000001, ga102_fifo_new },
 };
 
 static const struct nvkm_device_chip
@@ -2622,6 +2623,7 @@ nv174_chipset = {
        .top      = { 0x00000001, ga100_top_new },
        .disp     = { 0x00000001, ga102_disp_new },
        .dma      = { 0x00000001, gv100_dma_new },
+       .fifo     = { 0x00000001, ga102_fifo_new },
 };
 
 static const struct nvkm_device_chip
@@ -2642,6 +2644,7 @@ nv177_chipset = {
        .top      = { 0x00000001, ga100_top_new },
        .disp     = { 0x00000001, ga102_disp_new },
        .dma      = { 0x00000001, gv100_dma_new },
+       .fifo     = { 0x00000001, ga102_fifo_new },
 };
 
 static int
index 3209eb7af65fb35e4b4dd70b33f77cd592c726e4..5e831d347a95795f3b22217b532afd4ceeb81714 100644 (file)
@@ -18,6 +18,7 @@ nvkm-y += nvkm/engine/fifo/gp100.o
 nvkm-y += nvkm/engine/fifo/gp10b.o
 nvkm-y += nvkm/engine/fifo/gv100.o
 nvkm-y += nvkm/engine/fifo/tu102.o
+nvkm-y += nvkm/engine/fifo/ga102.o
 
 nvkm-y += nvkm/engine/fifo/chan.o
 nvkm-y += nvkm/engine/fifo/channv50.o
index 353b77d9b3dcffe35f12172e36e24b75cd454950..3492c561f2cfc858c4f776e1fd3d754d0136d544 100644 (file)
@@ -82,7 +82,7 @@ g84_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
        if (offset < 0)
                return 0;
 
-       engn = fifo->base.func->engine_id(&fifo->base, engine);
+       engn = fifo->base.func->engine_id(&fifo->base, engine) - 1;
        save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn);
        nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12);
        done = nvkm_msec(device, 2000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
new file mode 100644 (file)
index 0000000..c630dbd
--- /dev/null
@@ -0,0 +1,311 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#define ga102_fifo(p) container_of((p), struct ga102_fifo, base.engine)
+#define ga102_chan(p) container_of((p), struct ga102_chan, object)
+#include <engine/fifo.h>
+#include "user.h"
+
+#include <core/memory.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
+#include <subdev/top.h>
+
+#include <nvif/cl0080.h>
+#include <nvif/clc36f.h>
+#include <nvif/class.h>
+
+struct ga102_fifo {
+       struct nvkm_fifo base;
+};
+
+struct ga102_chan {
+       struct nvkm_object object;
+
+       struct {
+               u32 runl;
+               u32 chan;
+       } ctrl;
+
+       struct nvkm_memory *mthd;
+       struct nvkm_memory *inst;
+       struct nvkm_memory *user;
+       struct nvkm_memory *runl;
+
+       struct nvkm_vmm *vmm;
+};
+
+static int
+ga102_chan_sclass(struct nvkm_object *object, int index, struct nvkm_oclass *oclass)
+{
+       if (index == 0) {
+               oclass->ctor = nvkm_object_new;
+               oclass->base = (struct nvkm_sclass) { -1, -1, AMPERE_DMA_COPY_B };
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
+static int
+ga102_chan_map(struct nvkm_object *object, void *argv, u32 argc,
+              enum nvkm_object_map *type, u64 *addr, u64 *size)
+{
+       struct ga102_chan *chan = ga102_chan(object);
+       struct nvkm_device *device = chan->object.engine->subdev.device;
+       u64 bar2 = nvkm_memory_bar2(chan->user);
+
+       if (bar2 == ~0ULL)
+               return -EFAULT;
+
+       *type = NVKM_OBJECT_MAP_IO;
+       *addr = device->func->resource_addr(device, 3) + bar2;
+       *size = 0x1000;
+       return 0;
+}
+
+static int
+ga102_chan_fini(struct nvkm_object *object, bool suspend)
+{
+       struct ga102_chan *chan = ga102_chan(object);
+       struct nvkm_device *device = chan->object.engine->subdev.device;
+
+       nvkm_wr32(device, chan->ctrl.chan, 0x00000003);
+
+       nvkm_wr32(device, chan->ctrl.runl + 0x098, 0x01000000);
+       nvkm_msec(device, 2000,
+               if (!(nvkm_rd32(device, chan->ctrl.runl + 0x098) & 0x00100000))
+                       break;
+       );
+
+       nvkm_wr32(device, chan->ctrl.runl + 0x088, 0);
+
+       nvkm_wr32(device, chan->ctrl.chan, 0xffffffff);
+       return 0;
+}
+
+static int
+ga102_chan_init(struct nvkm_object *object)
+{
+       struct ga102_chan *chan = ga102_chan(object);
+       struct nvkm_device *device = chan->object.engine->subdev.device;
+
+       nvkm_mask(device, chan->ctrl.runl + 0x300, 0x80000000, 0x80000000);
+
+       nvkm_wr32(device, chan->ctrl.runl + 0x080, lower_32_bits(nvkm_memory_addr(chan->runl)));
+       nvkm_wr32(device, chan->ctrl.runl + 0x084, upper_32_bits(nvkm_memory_addr(chan->runl)));
+       nvkm_wr32(device, chan->ctrl.runl + 0x088, 2);
+
+       nvkm_wr32(device, chan->ctrl.chan, 0x00000002);
+       nvkm_wr32(device, chan->ctrl.runl + 0x0090, 0);
+       return 0;
+}
+
+static void *
+ga102_chan_dtor(struct nvkm_object *object)
+{
+       struct ga102_chan *chan = ga102_chan(object);
+
+       if (chan->vmm) {
+               nvkm_vmm_part(chan->vmm, chan->inst);
+               nvkm_vmm_unref(&chan->vmm);
+       }
+
+       nvkm_memory_unref(&chan->runl);
+       nvkm_memory_unref(&chan->user);
+       nvkm_memory_unref(&chan->inst);
+       nvkm_memory_unref(&chan->mthd);
+       return chan;
+}
+
+static const struct nvkm_object_func
+ga102_chan = {
+       .dtor = ga102_chan_dtor,
+       .init = ga102_chan_init,
+       .fini = ga102_chan_fini,
+       .map = ga102_chan_map,
+       .sclass = ga102_chan_sclass,
+};
+
+static int
+ga102_chan_new(struct nvkm_device *device,
+              const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject)
+{
+       struct volta_channel_gpfifo_a_v0 *args = argv;
+       struct nvkm_top_device *tdev;
+       struct nvkm_vmm *vmm;
+       struct ga102_chan *chan;
+       int ret;
+
+       if (argc != sizeof(*args))
+               return -ENOSYS;
+
+       vmm = nvkm_uvmm_search(oclass->client, args->vmm);
+       if (IS_ERR(vmm))
+               return PTR_ERR(vmm);
+
+       if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_object_ctor(&ga102_chan, oclass, &chan->object);
+       *pobject = &chan->object;
+
+       list_for_each_entry(tdev, &device->top->device, head) {
+               if (tdev->type == NVKM_ENGINE_CE) {
+                       chan->ctrl.runl = tdev->runlist;
+                       break;
+               }
+       }
+
+       if (!chan->ctrl.runl)
+               return -ENODEV;
+
+       chan->ctrl.chan = nvkm_rd32(device, chan->ctrl.runl + 0x004) & 0xfffffff0;
+
+       args->chid = 0;
+       args->inst = 0;
+       args->token = nvkm_rd32(device, chan->ctrl.runl + 0x008) & 0xffff0000;
+
+       ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->mthd);
+       if (ret)
+               return ret;
+
+       ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->inst);
+       if (ret)
+               return ret;
+
+       nvkm_kmap(chan->inst);
+       nvkm_wo32(chan->inst, 0x010, 0x0000face);
+       nvkm_wo32(chan->inst, 0x030, 0x7ffff902);
+       nvkm_wo32(chan->inst, 0x048, lower_32_bits(args->ioffset));
+       nvkm_wo32(chan->inst, 0x04c, upper_32_bits(args->ioffset) |
+                                    (order_base_2(args->ilength / 8) << 16));
+       nvkm_wo32(chan->inst, 0x084, 0x20400000);
+       nvkm_wo32(chan->inst, 0x094, 0x30000001);
+       nvkm_wo32(chan->inst, 0x0ac, 0x00020000);
+       nvkm_wo32(chan->inst, 0x0e4, 0x00000000);
+       nvkm_wo32(chan->inst, 0x0e8, 0);
+       nvkm_wo32(chan->inst, 0x0f4, 0x00001000);
+       nvkm_wo32(chan->inst, 0x0f8, 0x10003080);
+       nvkm_mo32(chan->inst, 0x218, 0x00000000, 0x00000000);
+       nvkm_wo32(chan->inst, 0x220, lower_32_bits(nvkm_memory_bar2(chan->mthd)));
+       nvkm_wo32(chan->inst, 0x224, upper_32_bits(nvkm_memory_bar2(chan->mthd)));
+       nvkm_done(chan->inst);
+
+       ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->user);
+       if (ret)
+               return ret;
+
+       ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->runl);
+       if (ret)
+               return ret;
+
+       nvkm_kmap(chan->runl);
+       nvkm_wo32(chan->runl, 0x00, 0x80030001);
+       nvkm_wo32(chan->runl, 0x04, 1);
+       nvkm_wo32(chan->runl, 0x08, 0);
+       nvkm_wo32(chan->runl, 0x0c, 0x00000000);
+       nvkm_wo32(chan->runl, 0x10, lower_32_bits(nvkm_memory_addr(chan->user)));
+       nvkm_wo32(chan->runl, 0x14, upper_32_bits(nvkm_memory_addr(chan->user)));
+       nvkm_wo32(chan->runl, 0x18, lower_32_bits(nvkm_memory_addr(chan->inst)));
+       nvkm_wo32(chan->runl, 0x1c, upper_32_bits(nvkm_memory_addr(chan->inst)));
+       nvkm_done(chan->runl);
+
+       ret = nvkm_vmm_join(vmm, chan->inst);
+       if (ret)
+               return ret;
+
+       chan->vmm = nvkm_vmm_ref(vmm);
+       return 0;
+}
+
+static const struct nvkm_device_oclass
+ga102_chan_oclass = {
+       .ctor = ga102_chan_new,
+};
+
+static int
+ga102_user_new(struct nvkm_device *device,
+              const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject)
+{
+       return tu102_fifo_user_new(oclass, argv, argc, pobject);
+}
+
+static const struct nvkm_device_oclass
+ga102_user_oclass = {
+       .ctor = ga102_user_new,
+};
+
+static int
+ga102_fifo_sclass(struct nvkm_oclass *oclass, int index, const struct nvkm_device_oclass **class)
+{
+       if (index == 0) {
+               oclass->base = (struct nvkm_sclass) { -1, -1, VOLTA_USERMODE_A };
+               *class = &ga102_user_oclass;
+               return 0;
+       } else
+       if (index == 1) {
+               oclass->base = (struct nvkm_sclass) { 0, 0, AMPERE_CHANNEL_GPFIFO_B };
+               *class = &ga102_chan_oclass;
+               return 0;
+       }
+
+       return 2;
+}
+
+static int
+ga102_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
+{
+       switch (mthd) {
+       case NV_DEVICE_HOST_CHANNELS: *data = 1; return 0;
+       default:
+               break;
+       }
+
+       return -ENOSYS;
+}
+
+static void *
+ga102_fifo_dtor(struct nvkm_engine *engine)
+{
+       return ga102_fifo(engine);
+}
+
+static const struct nvkm_engine_func
+ga102_fifo = {
+       .dtor = ga102_fifo_dtor,
+       .info = ga102_fifo_info,
+       .base.sclass = ga102_fifo_sclass,
+};
+
+int
+ga102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+              struct nvkm_fifo **pfifo)
+{
+       struct ga102_fifo *fifo;
+
+       if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_engine_ctor(&ga102_fifo, device, type, inst, true, &fifo->base.engine);
+       *pfifo = &fifo->base;
+       return 0;
+}
index 31933f3e5a076b36e14fadf75cbef8f39a7815fc..c982d834c8d98579dd436eb632a6086c84f594d0 100644 (file)
@@ -54,7 +54,7 @@ ga100_top_oneinit(struct nvkm_top *top)
                        info->reset   = (data & 0x0000001f);
                        break;
                case 2:
-                       info->runlist = (data & 0x0000fc00) >> 10;
+                       info->runlist = (data & 0x00fffc00);
                        info->engine  = (data & 0x00000003);
                        break;
                default:
@@ -85,9 +85,10 @@ ga100_top_oneinit(struct nvkm_top *top)
                }
 
                nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d "
-                                  "runlist %2d engine %2d reset %2d\n", type, inst,
+                                  "runlist %6x engine %2d reset %2d\n", type, inst,
                           info->type == NVKM_SUBDEV_NR ? "????????" : nvkm_subdev_type[info->type],
-                          info->addr, info->fault, info->runlist, info->engine, info->reset);
+                          info->addr, info->fault, info->runlist < 0 ? 0 : info->runlist,
+                          info->engine, info->reset);
                info = NULL;
        }
 
index beb581b96ecdc6f9dbdbc64676137de854428cd4..418638e6e3b0a06e1302ccdf682b7ea99232760d 100644 (file)
@@ -295,6 +295,7 @@ config DRM_PANEL_OLIMEX_LCD_OLINUXINO
        depends on OF
        depends on I2C
        depends on BACKLIGHT_CLASS_DEVICE
+       select CRC32
        help
          The panel is used with different sizes LCDs, from 480x272 to
          1280x800, and 24 bit per pixel.
index 2d8794d495d08e62d749909f0e87ba040843e01e..3d8a9ab47cae2face971d4deb8ed6863afc9ee71 100644 (file)
@@ -146,8 +146,8 @@ static const struct reg_sequence y030xx067a_init_sequence[] = {
        { 0x09, REG09_SUB_BRIGHT_R(0x20) },
        { 0x0a, REG0A_SUB_BRIGHT_B(0x20) },
        { 0x0b, REG0B_HD_FREERUN | REG0B_VD_FREERUN },
-       { 0x0c, REG0C_CONTRAST_R(0x10) },
-       { 0x0d, REG0D_CONTRAST_G(0x10) },
+       { 0x0c, REG0C_CONTRAST_R(0x00) },
+       { 0x0d, REG0D_CONTRAST_G(0x00) },
        { 0x0e, REG0E_CONTRAST_B(0x10) },
        { 0x0f, 0 },
        { 0x10, REG10_BRIGHT(0x7f) },
index 0145129d7c661ce0e9ca1f095faec49b758df9a3..534dd7414d428cddead1d2ddfb23cba2c30cb504 100644 (file)
@@ -590,14 +590,14 @@ static const struct drm_display_mode k101_im2byl02_default_mode = {
        .clock          = 69700,
 
        .hdisplay       = 800,
-       .hsync_start    = 800 + 6,
-       .hsync_end      = 800 + 6 + 15,
-       .htotal         = 800 + 6 + 15 + 16,
+       .hsync_start    = 800 + 52,
+       .hsync_end      = 800 + 52 + 8,
+       .htotal         = 800 + 52 + 8 + 48,
 
        .vdisplay       = 1280,
-       .vsync_start    = 1280 + 8,
-       .vsync_end      = 1280 + 8 + 48,
-       .vtotal         = 1280 + 8 + 48 + 52,
+       .vsync_start    = 1280 + 16,
+       .vsync_end      = 1280 + 16 + 6,
+       .vtotal         = 1280 + 16 + 6 + 15,
 
        .width_mm       = 135,
        .height_mm      = 217,
index 0ecccf25a3c760337773d2d0e843ec865423df32..d2a0f5394fef69e4b5f4a103139620c04a1c5cf1 100644 (file)
@@ -214,7 +214,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
        }
        ret = 0;
 
-#if defined(__i386__) || defined(__x86_64__)
+#ifdef CONFIG_X86
        wbinvd();
 #else
        mb();
index 0daa8bba50f5a9097e861773ee725baf9bcb991d..4bf4e25d7f011fdacb1eabb1662f2e9fedf9d448 100644 (file)
@@ -86,12 +86,20 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
        }
 
        /*
-        * Create and initialize the encoder. On Gen3 skip the LVDS1 output if
+        * Create and initialize the encoder. On Gen3, skip the LVDS1 output if
         * the LVDS1 encoder is used as a companion for LVDS0 in dual-link
-        * mode.
+        * mode, or any LVDS output if it isn't connected. The latter may happen
+        * on D3 or E3 as the LVDS encoders are needed to provide the pixel
+        * clock to the DU, even when the LVDS outputs are not used.
         */
-       if (rcdu->info->gen >= 3 && output == RCAR_DU_OUTPUT_LVDS1) {
-               if (rcar_lvds_dual_link(bridge))
+       if (rcdu->info->gen >= 3) {
+               if (output == RCAR_DU_OUTPUT_LVDS1 &&
+                   rcar_lvds_dual_link(bridge))
+                       return -ENOLINK;
+
+               if ((output == RCAR_DU_OUTPUT_LVDS0 ||
+                    output == RCAR_DU_OUTPUT_LVDS1) &&
+                   !rcar_lvds_is_connected(bridge))
                        return -ENOLINK;
        }
 
index d061b8de748fdd44fe71b3628ade5e8d1b8e1a13..b672c5bd72ee888764ba58177729b2b3b8ef2378 100644 (file)
@@ -576,6 +576,9 @@ static int rcar_lvds_attach(struct drm_bridge *bridge,
 {
        struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
 
+       if (!lvds->next_bridge)
+               return 0;
+
        return drm_bridge_attach(bridge->encoder, lvds->next_bridge, bridge,
                                 flags);
 }
@@ -598,6 +601,14 @@ bool rcar_lvds_dual_link(struct drm_bridge *bridge)
 }
 EXPORT_SYMBOL_GPL(rcar_lvds_dual_link);
 
+bool rcar_lvds_is_connected(struct drm_bridge *bridge)
+{
+       struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
+
+       return lvds->next_bridge != NULL;
+}
+EXPORT_SYMBOL_GPL(rcar_lvds_is_connected);
+
 /* -----------------------------------------------------------------------------
  * Probe & Remove
  */
index 222ec0e60785ca31901d0a14cf8ce9ca89860cb3..eb7c6ef03b00afa402f15b9aac6cb4db30624cb3 100644 (file)
@@ -16,6 +16,7 @@ struct drm_bridge;
 int rcar_lvds_clk_enable(struct drm_bridge *bridge, unsigned long freq);
 void rcar_lvds_clk_disable(struct drm_bridge *bridge);
 bool rcar_lvds_dual_link(struct drm_bridge *bridge);
+bool rcar_lvds_is_connected(struct drm_bridge *bridge);
 #else
 static inline int rcar_lvds_clk_enable(struct drm_bridge *bridge,
                                       unsigned long freq)
@@ -27,6 +28,10 @@ static inline bool rcar_lvds_dual_link(struct drm_bridge *bridge)
 {
        return false;
 }
+static inline bool rcar_lvds_is_connected(struct drm_bridge *bridge)
+{
+       return false;
+}
 #endif /* CONFIG_DRM_RCAR_LVDS */
 
 #endif /* __RCAR_LVDS_H__ */
index ba9e14da41b4811a581c76e211a5c5b99f9e32d4..a25b98b7f5bd7875c8be067b5b58e95f8d9a820f 100644 (file)
@@ -1174,26 +1174,24 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
         *
         * Action plan:
         *
-        * 1. When DRM gives us a mode, we should add 999 Hz to it.  That way
-        *    if the clock we need is 60000001 Hz (~60 MHz) and DRM tells us to
-        *    make 60000 kHz then the clock framework will actually give us
-        *    the right clock.
+        * 1. Try to set the exact rate first, and confirm the clock framework
+        *    can provide it.
         *
-        *    NOTE: if the PLL (maybe through a divider) could actually make
-        *    a clock rate 999 Hz higher instead of the one we want then this
-        *    could be a problem.  Unfortunately there's not much we can do
-        *    since it's baked into DRM to use kHz.  It shouldn't matter in
-        *    practice since Rockchip PLLs are controlled by tables and
-        *    even if there is a divider in the middle I wouldn't expect PLL
-        *    rates in the table that are just a few kHz different.
+        * 2. If the clock framework cannot provide the exact rate, we should
+        *    add 999 Hz to the requested rate.  That way if the clock we need
+        *    is 60000001 Hz (~60 MHz) and DRM tells us to make 60000 kHz then
+        *    the clock framework will actually give us the right clock.
         *
-        * 2. Get the clock framework to round the rate for us to tell us
+        * 3. Get the clock framework to round the rate for us to tell us
         *    what it will actually make.
         *
-        * 3. Store the rounded up rate so that we don't need to worry about
+        * 4. Store the rounded up rate so that we don't need to worry about
         *    this in the actual clk_set_rate().
         */
-       rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000 + 999);
+       rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000);
+       if (rate / 1000 != adjusted_mode->clock)
+               rate = clk_round_rate(vop->dclk,
+                                     adjusted_mode->clock * 1000 + 999);
        adjusted_mode->clock = DIV_ROUND_UP(rate, 1000);
 
        return true;
index f75fb157f2ff757da981031f438f80afda33d355..016b877051dabfc3b1b9eddc7189cb815896c7f1 100644 (file)
@@ -216,11 +216,13 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
                goto err_disable_clk_tmds;
        }
 
+       ret = sun8i_hdmi_phy_init(hdmi->phy);
+       if (ret)
+               goto err_disable_clk_tmds;
+
        drm_encoder_helper_add(encoder, &sun8i_dw_hdmi_encoder_helper_funcs);
        drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
 
-       sun8i_hdmi_phy_init(hdmi->phy);
-
        plat_data->mode_valid = hdmi->quirks->mode_valid;
        plat_data->use_drm_infoframe = hdmi->quirks->use_drm_infoframe;
        sun8i_hdmi_phy_set_ops(hdmi->phy, plat_data);
@@ -262,6 +264,7 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
        struct sun8i_dw_hdmi *hdmi = dev_get_drvdata(dev);
 
        dw_hdmi_unbind(hdmi->hdmi);
+       sun8i_hdmi_phy_deinit(hdmi->phy);
        clk_disable_unprepare(hdmi->clk_tmds);
        reset_control_assert(hdmi->rst_ctrl);
        gpiod_set_value(hdmi->ddc_en, 0);
index 74f6ed0e25709f6ae3bbfebb430ae89bf75534cb..bffe1b9cd3dcb435f019d25668cd3e9c02beaf2c 100644 (file)
@@ -169,6 +169,7 @@ struct sun8i_hdmi_phy {
        struct clk                      *clk_phy;
        struct clk                      *clk_pll0;
        struct clk                      *clk_pll1;
+       struct device                   *dev;
        unsigned int                    rcal;
        struct regmap                   *regs;
        struct reset_control            *rst_phy;
@@ -205,7 +206,8 @@ encoder_to_sun8i_dw_hdmi(struct drm_encoder *encoder)
 
 int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
 
-void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
+int sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
+void sun8i_hdmi_phy_deinit(struct sun8i_hdmi_phy *phy);
 void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
                            struct dw_hdmi_plat_data *plat_data);
 
index c9239708d398cdaa85ebffba7ca4a41ecba5a760..b64d93da651d22d03e74057018cea28d37004a02 100644 (file)
@@ -506,9 +506,60 @@ static void sun8i_hdmi_phy_init_h3(struct sun8i_hdmi_phy *phy)
        phy->rcal = (val & SUN8I_HDMI_PHY_ANA_STS_RCAL_MASK) >> 2;
 }
 
-void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy)
+int sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy)
 {
+       int ret;
+
+       ret = reset_control_deassert(phy->rst_phy);
+       if (ret) {
+               dev_err(phy->dev, "Cannot deassert phy reset control: %d\n", ret);
+               return ret;
+       }
+
+       ret = clk_prepare_enable(phy->clk_bus);
+       if (ret) {
+               dev_err(phy->dev, "Cannot enable bus clock: %d\n", ret);
+               goto err_assert_rst_phy;
+       }
+
+       ret = clk_prepare_enable(phy->clk_mod);
+       if (ret) {
+               dev_err(phy->dev, "Cannot enable mod clock: %d\n", ret);
+               goto err_disable_clk_bus;
+       }
+
+       if (phy->variant->has_phy_clk) {
+               ret = sun8i_phy_clk_create(phy, phy->dev,
+                                          phy->variant->has_second_pll);
+               if (ret) {
+                       dev_err(phy->dev, "Couldn't create the PHY clock\n");
+                       goto err_disable_clk_mod;
+               }
+
+               clk_prepare_enable(phy->clk_phy);
+       }
+
        phy->variant->phy_init(phy);
+
+       return 0;
+
+err_disable_clk_mod:
+       clk_disable_unprepare(phy->clk_mod);
+err_disable_clk_bus:
+       clk_disable_unprepare(phy->clk_bus);
+err_assert_rst_phy:
+       reset_control_assert(phy->rst_phy);
+
+       return ret;
+}
+
+void sun8i_hdmi_phy_deinit(struct sun8i_hdmi_phy *phy)
+{
+       clk_disable_unprepare(phy->clk_mod);
+       clk_disable_unprepare(phy->clk_bus);
+       clk_disable_unprepare(phy->clk_phy);
+
+       reset_control_assert(phy->rst_phy);
 }
 
 void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
@@ -638,6 +689,7 @@ static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        phy->variant = (struct sun8i_hdmi_phy_variant *)match->data;
+       phy->dev = dev;
 
        ret = of_address_to_resource(node, 0, &res);
        if (ret) {
@@ -696,47 +748,10 @@ static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
                goto err_put_clk_pll1;
        }
 
-       ret = reset_control_deassert(phy->rst_phy);
-       if (ret) {
-               dev_err(dev, "Cannot deassert phy reset control: %d\n", ret);
-               goto err_put_rst_phy;
-       }
-
-       ret = clk_prepare_enable(phy->clk_bus);
-       if (ret) {
-               dev_err(dev, "Cannot enable bus clock: %d\n", ret);
-               goto err_deassert_rst_phy;
-       }
-
-       ret = clk_prepare_enable(phy->clk_mod);
-       if (ret) {
-               dev_err(dev, "Cannot enable mod clock: %d\n", ret);
-               goto err_disable_clk_bus;
-       }
-
-       if (phy->variant->has_phy_clk) {
-               ret = sun8i_phy_clk_create(phy, dev,
-                                          phy->variant->has_second_pll);
-               if (ret) {
-                       dev_err(dev, "Couldn't create the PHY clock\n");
-                       goto err_disable_clk_mod;
-               }
-
-               clk_prepare_enable(phy->clk_phy);
-       }
-
        platform_set_drvdata(pdev, phy);
 
        return 0;
 
-err_disable_clk_mod:
-       clk_disable_unprepare(phy->clk_mod);
-err_disable_clk_bus:
-       clk_disable_unprepare(phy->clk_bus);
-err_deassert_rst_phy:
-       reset_control_assert(phy->rst_phy);
-err_put_rst_phy:
-       reset_control_put(phy->rst_phy);
 err_put_clk_pll1:
        clk_put(phy->clk_pll1);
 err_put_clk_pll0:
@@ -753,12 +768,6 @@ static int sun8i_hdmi_phy_remove(struct platform_device *pdev)
 {
        struct sun8i_hdmi_phy *phy = platform_get_drvdata(pdev);
 
-       clk_disable_unprepare(phy->clk_mod);
-       clk_disable_unprepare(phy->clk_bus);
-       clk_disable_unprepare(phy->clk_phy);
-
-       reset_control_assert(phy->rst_phy);
-
        reset_control_put(phy->rst_phy);
 
        clk_put(phy->clk_pll0);
index 16c7aabb94d3722d14dffa1971153a26703e47c6..a29d64f8756354fcc84db6f812a1a24906928b37 100644 (file)
@@ -1845,7 +1845,6 @@ tegra_crtc_update_memory_bandwidth(struct drm_crtc *crtc,
                                   bool prepare_bandwidth_transition)
 {
        const struct tegra_plane_state *old_tegra_state, *new_tegra_state;
-       const struct tegra_dc_state *old_dc_state, *new_dc_state;
        u32 i, new_avg_bw, old_avg_bw, new_peak_bw, old_peak_bw;
        const struct drm_plane_state *old_plane_state;
        const struct drm_crtc_state *old_crtc_state;
@@ -1858,8 +1857,6 @@ tegra_crtc_update_memory_bandwidth(struct drm_crtc *crtc,
                return;
 
        old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
-       old_dc_state = to_const_dc_state(old_crtc_state);
-       new_dc_state = to_const_dc_state(crtc->state);
 
        if (!crtc->state->active) {
                if (!old_crtc_state->active)
index f0cb691852a1c91335466d3e1e834c45ace668cd..40378308d527a1369ed0f19e644e506a8b45e145 100644 (file)
@@ -35,12 +35,6 @@ static inline struct tegra_dc_state *to_dc_state(struct drm_crtc_state *state)
        return NULL;
 }
 
-static inline const struct tegra_dc_state *
-to_const_dc_state(const struct drm_crtc_state *state)
-{
-       return to_dc_state((struct drm_crtc_state *)state);
-}
-
 struct tegra_dc_stats {
        unsigned long frames;
        unsigned long vblank;
index dc16a24f4dbe2df287fc296d5381686b0dc41f14..690a339c52ec6efebd6cbd5f5eff9529e26d5c10 100644 (file)
@@ -222,7 +222,7 @@ int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_f
                mapping->iova = sg_dma_address(mapping->sgt->sgl);
        }
 
-       mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->size;
+       mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size;
 
        err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX),
                       GFP_KERNEL);
index b4b4653fe301ed6866d61edb7b2ac4f057e8d153..ed8a4b7f8b6e28b903d3fc0a7543aad9035e2d92 100644 (file)
@@ -1395,14 +1395,6 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
        return 0;
 }
 
-static const struct snd_soc_dapm_widget vc4_hdmi_audio_widgets[] = {
-       SND_SOC_DAPM_OUTPUT("TX"),
-};
-
-static const struct snd_soc_dapm_route vc4_hdmi_audio_routes[] = {
-       { "TX", NULL, "Playback" },
-};
-
 static const struct snd_soc_component_driver vc4_hdmi_audio_cpu_dai_comp = {
        .name = "vc4-hdmi-cpu-dai-component",
 };
index 6941add95d0faddcf3b9412b6d5091d2b73e4dfa..ecab72882192903ef400da94cc0d95f8b7a48d31 100644 (file)
@@ -15,7 +15,7 @@
 #include "intr.h"
 #include "syncpt.h"
 
-DEFINE_SPINLOCK(lock);
+static DEFINE_SPINLOCK(lock);
 
 struct host1x_syncpt_fence {
        struct dma_fence base;
@@ -152,8 +152,10 @@ struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold)
                return ERR_PTR(-ENOMEM);
 
        fence->waiter = kzalloc(sizeof(*fence->waiter), GFP_KERNEL);
-       if (!fence->waiter)
+       if (!fence->waiter) {
+               kfree(fence);
                return ERR_PTR(-ENOMEM);
+       }
 
        fence->sp = sp;
        fence->threshold = threshold;
index 79b138fd426197a2ba76f29e585c303b49537644..05c007b213f245ac202ab4399cd3e7c16dd4aab5 100644 (file)
@@ -255,13 +255,13 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
        if (!privdata->cl_data)
                return -ENOMEM;
 
-       rc = devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);
+       mp2_select_ops(privdata);
+
+       rc = amd_sfh_hid_client_init(privdata);
        if (rc)
                return rc;
 
-       mp2_select_ops(privdata);
-
-       return amd_sfh_hid_client_init(privdata);
+       return devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);
 }
 
 static int __maybe_unused amd_mp2_pci_resume(struct device *dev)
index 833fcf07ff35ad5bce4310def979e45e83983bc6..6ccfa0cb997aba82a1b3728c26ed34516b0cc828 100644 (file)
@@ -336,12 +336,19 @@ static int apple_event(struct hid_device *hdev, struct hid_field *field,
 
 /*
  * MacBook JIS keyboard has wrong logical maximum
+ * Magic Keyboard JIS has wrong logical maximum
  */
 static __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                unsigned int *rsize)
 {
        struct apple_sc *asc = hid_get_drvdata(hdev);
 
+       if(*rsize >=71 && rdesc[70] == 0x65 && rdesc[64] == 0x65) {
+               hid_info(hdev,
+                        "fixing up Magic Keyboard JIS report descriptor\n");
+               rdesc[64] = rdesc[70] = 0xe7;
+       }
+
        if ((asc->quirks & APPLE_RDESC_JIS) && *rsize >= 60 &&
                        rdesc[53] == 0x65 && rdesc[59] == 0x65) {
                hid_info(hdev,
index 0790fbd3fc9a26c8a4e7c518f541aac7bec5a10a..467d789f9bc2d3a0d846c661b618d1b640a63188 100644 (file)
@@ -56,15 +56,22 @@ static int betopff_init(struct hid_device *hid)
 {
        struct betopff_device *betopff;
        struct hid_report *report;
-       struct hid_input *hidinput =
-                       list_first_entry(&hid->inputs, struct hid_input, list);
+       struct hid_input *hidinput;
        struct list_head *report_list =
                        &hid->report_enum[HID_OUTPUT_REPORT].report_list;
-       struct input_dev *dev = hidinput->input;
+       struct input_dev *dev;
        int field_count = 0;
        int error;
        int i, j;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+
+       hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+       dev = hidinput->input;
+
        if (list_empty(report_list)) {
                hid_err(hid, "no output reports found\n");
                return -ENODEV;
index 95e0807878c7ea9137e4b29159bca278c761152b..d70cd3d7f583b25f5736e464f7e8bac09deb302c 100644 (file)
@@ -198,7 +198,9 @@ static int u2fzero_rng_read(struct hwrng *rng, void *data,
        }
 
        ret = u2fzero_recv(dev, &req, &resp);
-       if (ret < 0)
+
+       /* ignore errors or packets without data */
+       if (ret < offsetof(struct u2f_hid_msg, init.data))
                return 0;
 
        /* only take the minimum amount of data it is safe to take */
index fd51769d0994ba5e189870fe782819412b6a77ef..33a6908995b1be7818d9e9ee0a4869573da67627 100644 (file)
@@ -4746,6 +4746,12 @@ static const struct wacom_features wacom_features_0x393 =
        { "Wacom Intuos Pro S", 31920, 19950, 8191, 63,
          INTUOSP2S_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7,
          .touch_max = 10 };
+static const struct wacom_features wacom_features_0x3c6 =
+       { "Wacom Intuos BT S", 15200, 9500, 4095, 63,
+         INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
+static const struct wacom_features wacom_features_0x3c8 =
+       { "Wacom Intuos BT M", 21600, 13500, 4095, 63,
+         INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
 
 static const struct wacom_features wacom_features_HID_ANY_ID =
        { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
@@ -4919,6 +4925,8 @@ const struct hid_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0x37A) },
        { USB_DEVICE_WACOM(0x37B) },
        { BT_DEVICE_WACOM(0x393) },
+       { BT_DEVICE_WACOM(0x3c6) },
+       { BT_DEVICE_WACOM(0x3c8) },
        { USB_DEVICE_WACOM(0x4001) },
        { USB_DEVICE_WACOM(0x4004) },
        { USB_DEVICE_WACOM(0x5000) },
index 42f3d9d123a12dab1e83f5087d2f745a77b70423..d030577ad6a2c58c6bee4dc6421738a1bb06019f 100644 (file)
@@ -13,6 +13,7 @@
 #define _HYPERV_VMBUS_H
 
 #include <linux/list.h>
+#include <linux/bitops.h>
 #include <asm/sync_bitops.h>
 #include <asm/hyperv-tlfs.h>
 #include <linux/atomic.h>
index 38bc35ac8135eee0effae59e0512c9f78d47ba5a..3618a924e78e4a47dfb882c0c824e9a3e9c37b46 100644 (file)
@@ -362,12 +362,6 @@ static const struct hwmon_channel_info *k10temp_info[] = {
                           HWMON_T_INPUT | HWMON_T_LABEL,
                           HWMON_T_INPUT | HWMON_T_LABEL,
                           HWMON_T_INPUT | HWMON_T_LABEL),
-       HWMON_CHANNEL_INFO(in,
-                          HWMON_I_INPUT | HWMON_I_LABEL,
-                          HWMON_I_INPUT | HWMON_I_LABEL),
-       HWMON_CHANNEL_INFO(curr,
-                          HWMON_C_INPUT | HWMON_C_LABEL,
-                          HWMON_C_INPUT | HWMON_C_LABEL),
        NULL
 };
 
index bb3f7749a0b001cb21a668f62de984363671575b..5423466de697af5bf704217f26eaccea058f404d 100644 (file)
@@ -989,8 +989,12 @@ static int ltc2947_setup(struct ltc2947_data *st)
                return ret;
 
        /* check external clock presence */
-       extclk = devm_clk_get(st->dev, NULL);
-       if (!IS_ERR(extclk)) {
+       extclk = devm_clk_get_optional(st->dev, NULL);
+       if (IS_ERR(extclk))
+               return dev_err_probe(st->dev, PTR_ERR(extclk),
+                                    "Failed to get external clock\n");
+
+       if (extclk) {
                unsigned long rate_hz;
                u8 pre = 0, div, tbctl;
                u64 aux;
index 116681fde33d2de2aeea021f3edc10e921e76eb5..89fe7b9fe26be3e837f879b2dad1a97fba6de9ea 100644 (file)
@@ -315,8 +315,8 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
 {
        struct mlxreg_fan *fan = cdev->devdata;
        unsigned long cur_state;
+       int i, config = 0;
        u32 regval;
-       int i;
        int err;
 
        /*
@@ -329,6 +329,12 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
         * overwritten.
         */
        if (state >= MLXREG_FAN_SPEED_MIN && state <= MLXREG_FAN_SPEED_MAX) {
+               /*
+                * This is configuration change, which is only supported through sysfs.
+                * For configuration non-zero value is to be returned to avoid thermal
+                * statistics update.
+                */
+               config = 1;
                state -= MLXREG_FAN_MAX_STATE;
                for (i = 0; i < state; i++)
                        fan->cooling_levels[i] = state;
@@ -343,7 +349,7 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
 
                cur_state = MLXREG_FAN_PWM_DUTY2STATE(regval);
                if (state < cur_state)
-                       return 0;
+                       return config;
 
                state = cur_state;
        }
@@ -359,7 +365,7 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
                dev_err(fan->dev, "Failed to write PWM duty\n");
                return err;
        }
-       return 0;
+       return config;
 }
 
 static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
index 0d68a78be980db1488de7140b9889807a4a5626e..ae664613289c45c81b3c6a40e374075fc51a3032 100644 (file)
@@ -340,18 +340,11 @@ static ssize_t occ_show_temp_10(struct device *dev,
                if (val == OCC_TEMP_SENSOR_FAULT)
                        return -EREMOTEIO;
 
-               /*
-                * VRM doesn't return temperature, only alarm bit. This
-                * attribute maps to tempX_alarm instead of tempX_input for
-                * VRM
-                */
-               if (temp->fru_type != OCC_FRU_TYPE_VRM) {
-                       /* sensor not ready */
-                       if (val == 0)
-                               return -EAGAIN;
+               /* sensor not ready */
+               if (val == 0)
+                       return -EAGAIN;
 
-                       val *= 1000;
-               }
+               val *= 1000;
                break;
        case 2:
                val = temp->fru_type;
@@ -886,7 +879,7 @@ static int occ_setup_sensor_attrs(struct occ *occ)
                                             0, i);
                attr++;
 
-               if (sensors->temp.version > 1 &&
+               if (sensors->temp.version == 2 &&
                    temp->fru_type == OCC_FRU_TYPE_VRM) {
                        snprintf(attr->name, sizeof(attr->name),
                                 "temp%d_alarm", s);
index df712ce4b164dbe4c5a841616804c2a47d9a0df5..53f7d1418bc9077d05f624c1475611e31bcbb87e 100644 (file)
@@ -171,8 +171,14 @@ static ssize_t ibm_cffps_debugfs_read(struct file *file, char __user *buf,
                cmd = CFFPS_SN_CMD;
                break;
        case CFFPS_DEBUGFS_MAX_POWER_OUT:
-               rc = i2c_smbus_read_word_swapped(psu->client,
-                                                CFFPS_MAX_POWER_OUT_CMD);
+               if (psu->version == cffps1) {
+                       rc = i2c_smbus_read_word_swapped(psu->client,
+                                       CFFPS_MAX_POWER_OUT_CMD);
+               } else {
+                       rc = i2c_smbus_read_word_data(psu->client,
+                                       CFFPS_MAX_POWER_OUT_CMD);
+               }
+
                if (rc < 0)
                        return rc;
 
index eb94bd5f4e2a883925c55e47615ac543b00e07f6..51986adfbf47c56f653576f027ccbc4913e214be 100644 (file)
@@ -54,7 +54,7 @@
 
 #define MP2975_RAIL2_FUNC      (PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | \
                                 PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | \
-                                PMBUS_PHASE_VIRTUAL)
+                                PMBUS_HAVE_POUT | PMBUS_PHASE_VIRTUAL)
 
 struct mp2975_data {
        struct pmbus_driver_info info;
index ede66ea6a730dd82787c05cec909c920d4ba5d2c..b963a369c5ab3069aa55568f706d4dc1af4aeeeb 100644 (file)
@@ -100,71 +100,81 @@ struct tmp421_data {
        s16 temp[4];
 };
 
-static int temp_from_s16(s16 reg)
+static int temp_from_raw(u16 reg, bool extended)
 {
        /* Mask out status bits */
        int temp = reg & ~0xf;
 
-       return (temp * 1000 + 128) / 256;
-}
-
-static int temp_from_u16(u16 reg)
-{
-       /* Mask out status bits */
-       int temp = reg & ~0xf;
-
-       /* Add offset for extended temperature range. */
-       temp -= 64 * 256;
+       if (extended)
+               temp = temp - 64 * 256;
+       else
+               temp = (s16)temp;
 
-       return (temp * 1000 + 128) / 256;
+       return DIV_ROUND_CLOSEST(temp * 1000, 256);
 }
 
-static struct tmp421_data *tmp421_update_device(struct device *dev)
+static int tmp421_update_device(struct tmp421_data *data)
 {
-       struct tmp421_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
+       int ret = 0;
        int i;
 
        mutex_lock(&data->update_lock);
 
        if (time_after(jiffies, data->last_updated + (HZ / 2)) ||
            !data->valid) {
-               data->config = i2c_smbus_read_byte_data(client,
-                       TMP421_CONFIG_REG_1);
+               ret = i2c_smbus_read_byte_data(client, TMP421_CONFIG_REG_1);
+               if (ret < 0)
+                       goto exit;
+               data->config = ret;
 
                for (i = 0; i < data->channels; i++) {
-                       data->temp[i] = i2c_smbus_read_byte_data(client,
-                               TMP421_TEMP_MSB[i]) << 8;
-                       data->temp[i] |= i2c_smbus_read_byte_data(client,
-                               TMP421_TEMP_LSB[i]);
+                       ret = i2c_smbus_read_byte_data(client, TMP421_TEMP_MSB[i]);
+                       if (ret < 0)
+                               goto exit;
+                       data->temp[i] = ret << 8;
+
+                       ret = i2c_smbus_read_byte_data(client, TMP421_TEMP_LSB[i]);
+                       if (ret < 0)
+                               goto exit;
+                       data->temp[i] |= ret;
                }
                data->last_updated = jiffies;
                data->valid = 1;
        }
 
+exit:
        mutex_unlock(&data->update_lock);
 
-       return data;
+       if (ret < 0) {
+               data->valid = 0;
+               return ret;
+       }
+
+       return 0;
 }
 
 static int tmp421_read(struct device *dev, enum hwmon_sensor_types type,
                       u32 attr, int channel, long *val)
 {
-       struct tmp421_data *tmp421 = tmp421_update_device(dev);
+       struct tmp421_data *tmp421 = dev_get_drvdata(dev);
+       int ret = 0;
+
+       ret = tmp421_update_device(tmp421);
+       if (ret)
+               return ret;
 
        switch (attr) {
        case hwmon_temp_input:
-               if (tmp421->config & TMP421_CONFIG_RANGE)
-                       *val = temp_from_u16(tmp421->temp[channel]);
-               else
-                       *val = temp_from_s16(tmp421->temp[channel]);
+               *val = temp_from_raw(tmp421->temp[channel],
+                                    tmp421->config & TMP421_CONFIG_RANGE);
                return 0;
        case hwmon_temp_fault:
                /*
-                * The OPEN bit signals a fault. This is bit 0 of the temperature
-                * register (low byte).
+                * Any of OPEN or /PVLD bits indicate a hardware mulfunction
+                * and the conversion result may be incorrect
                 */
-               *val = tmp421->temp[channel] & 0x01;
+               *val = !!(tmp421->temp[channel] & 0x03);
                return 0;
        default:
                return -EOPNOTSUPP;
@@ -177,9 +187,6 @@ static umode_t tmp421_is_visible(const void *data, enum hwmon_sensor_types type,
 {
        switch (attr) {
        case hwmon_temp_fault:
-               if (channel == 0)
-                       return 0;
-               return 0444;
        case hwmon_temp_input:
                return 0444;
        default:
index 37b25a1474c4633ba40bf4a5fd1d435983357519..3c1be2c11fdf0252daf6bc66fc751874227229b9 100644 (file)
@@ -273,9 +273,6 @@ struct w83791d_data {
        char valid;                     /* !=0 if following fields are valid */
        unsigned long last_updated;     /* In jiffies */
 
-       /* array of 2 pointers to subclients */
-       struct i2c_client *lm75[2];
-
        /* volts */
        u8 in[NUMBER_OF_VIN];           /* Register value */
        u8 in_max[NUMBER_OF_VIN];       /* Register value */
@@ -1257,7 +1254,6 @@ static const struct attribute_group w83791d_group_fanpwm45 = {
 static int w83791d_detect_subclients(struct i2c_client *client)
 {
        struct i2c_adapter *adapter = client->adapter;
-       struct w83791d_data *data = i2c_get_clientdata(client);
        int address = client->addr;
        int i, id;
        u8 val;
@@ -1280,22 +1276,19 @@ static int w83791d_detect_subclients(struct i2c_client *client)
        }
 
        val = w83791d_read(client, W83791D_REG_I2C_SUBADDR);
-       if (!(val & 0x08))
-               data->lm75[0] = devm_i2c_new_dummy_device(&client->dev, adapter,
-                                                         0x48 + (val & 0x7));
-       if (!(val & 0x80)) {
-               if (!IS_ERR(data->lm75[0]) &&
-                               ((val & 0x7) == ((val >> 4) & 0x7))) {
-                       dev_err(&client->dev,
-                               "duplicate addresses 0x%x, "
-                               "use force_subclient\n",
-                               data->lm75[0]->addr);
-                       return -ENODEV;
-               }
-               data->lm75[1] = devm_i2c_new_dummy_device(&client->dev, adapter,
-                                                         0x48 + ((val >> 4) & 0x7));
+
+       if (!(val & 0x88) && (val & 0x7) == ((val >> 4) & 0x7)) {
+               dev_err(&client->dev,
+                       "duplicate addresses 0x%x, use force_subclient\n", 0x48 + (val & 0x7));
+               return -ENODEV;
        }
 
+       if (!(val & 0x08))
+               devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + (val & 0x7));
+
+       if (!(val & 0x80))
+               devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + ((val >> 4) & 0x7));
+
        return 0;
 }
 
index abd5c3a722b91a4aa9c6a1ebb2d24a376d00b801..1f175f3813506ddd074650a09eb5970b32ddfbc7 100644 (file)
@@ -264,9 +264,6 @@ struct w83792d_data {
        char valid;             /* !=0 if following fields are valid */
        unsigned long last_updated;     /* In jiffies */
 
-       /* array of 2 pointers to subclients */
-       struct i2c_client *lm75[2];
-
        u8 in[9];               /* Register value */
        u8 in_max[9];           /* Register value */
        u8 in_min[9];           /* Register value */
@@ -927,7 +924,6 @@ w83792d_detect_subclients(struct i2c_client *new_client)
        int address = new_client->addr;
        u8 val;
        struct i2c_adapter *adapter = new_client->adapter;
-       struct w83792d_data *data = i2c_get_clientdata(new_client);
 
        id = i2c_adapter_id(adapter);
        if (force_subclients[0] == id && force_subclients[1] == address) {
@@ -946,21 +942,19 @@ w83792d_detect_subclients(struct i2c_client *new_client)
        }
 
        val = w83792d_read_value(new_client, W83792D_REG_I2C_SUBADDR);
-       if (!(val & 0x08))
-               data->lm75[0] = devm_i2c_new_dummy_device(&new_client->dev, adapter,
-                                                         0x48 + (val & 0x7));
-       if (!(val & 0x80)) {
-               if (!IS_ERR(data->lm75[0]) &&
-                       ((val & 0x7) == ((val >> 4) & 0x7))) {
-                       dev_err(&new_client->dev,
-                               "duplicate addresses 0x%x, use force_subclient\n",
-                               data->lm75[0]->addr);
-                       return -ENODEV;
-               }
-               data->lm75[1] = devm_i2c_new_dummy_device(&new_client->dev, adapter,
-                                                         0x48 + ((val >> 4) & 0x7));
+
+       if (!(val & 0x88) && (val & 0x7) == ((val >> 4) & 0x7)) {
+               dev_err(&new_client->dev,
+                       "duplicate addresses 0x%x, use force_subclient\n", 0x48 + (val & 0x7));
+               return -ENODEV;
        }
 
+       if (!(val & 0x08))
+               devm_i2c_new_dummy_device(&new_client->dev, adapter, 0x48 + (val & 0x7));
+
+       if (!(val & 0x80))
+               devm_i2c_new_dummy_device(&new_client->dev, adapter, 0x48 + ((val >> 4) & 0x7));
+
        return 0;
 }
 
index e7d0484eabe4c2a8b59470c69b8aac4def755c5a..1d2854de1cfc961b705516fc3f50b90865eac64e 100644 (file)
@@ -202,7 +202,6 @@ static inline s8 TEMP_TO_REG(long val, s8 min, s8 max)
 }
 
 struct w83793_data {
-       struct i2c_client *lm75[2];
        struct device *hwmon_dev;
        struct mutex update_lock;
        char valid;                     /* !=0 if following fields are valid */
@@ -1566,7 +1565,6 @@ w83793_detect_subclients(struct i2c_client *client)
        int address = client->addr;
        u8 tmp;
        struct i2c_adapter *adapter = client->adapter;
-       struct w83793_data *data = i2c_get_clientdata(client);
 
        id = i2c_adapter_id(adapter);
        if (force_subclients[0] == id && force_subclients[1] == address) {
@@ -1586,21 +1584,19 @@ w83793_detect_subclients(struct i2c_client *client)
        }
 
        tmp = w83793_read_value(client, W83793_REG_I2C_SUBADDR);
-       if (!(tmp & 0x08))
-               data->lm75[0] = devm_i2c_new_dummy_device(&client->dev, adapter,
-                                                         0x48 + (tmp & 0x7));
-       if (!(tmp & 0x80)) {
-               if (!IS_ERR(data->lm75[0])
-                   && ((tmp & 0x7) == ((tmp >> 4) & 0x7))) {
-                       dev_err(&client->dev,
-                               "duplicate addresses 0x%x, "
-                               "use force_subclients\n", data->lm75[0]->addr);
-                       return -ENODEV;
-               }
-               data->lm75[1] = devm_i2c_new_dummy_device(&client->dev, adapter,
-                                                         0x48 + ((tmp >> 4) & 0x7));
+
+       if (!(tmp & 0x88) && (tmp & 0x7) == ((tmp >> 4) & 0x7)) {
+               dev_err(&client->dev,
+                       "duplicate addresses 0x%x, use force_subclient\n", 0x48 + (tmp & 0x7));
+               return -ENODEV;
        }
 
+       if (!(tmp & 0x08))
+               devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + (tmp & 0x7));
+
+       if (!(tmp & 0x80))
+               devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + ((tmp >> 4) & 0x7));
+
        return 0;
 }
 
index 4e0b7c2882cede7b7d2a30c3a77b1bcba18f378c..015e11c4663f3b2df0cb05764f1fd3c887b4ca46 100644 (file)
@@ -49,7 +49,7 @@
 #define MLXCPLD_LPCI2C_NACK_IND                2
 
 #define MLXCPLD_I2C_FREQ_1000KHZ_SET   0x04
-#define MLXCPLD_I2C_FREQ_400KHZ_SET    0x0f
+#define MLXCPLD_I2C_FREQ_400KHZ_SET    0x0c
 #define MLXCPLD_I2C_FREQ_100KHZ_SET    0x42
 
 enum mlxcpld_i2c_frequency {
@@ -495,7 +495,7 @@ mlxcpld_i2c_set_frequency(struct mlxcpld_i2c_priv *priv,
                return err;
 
        /* Set frequency only if it is not 100KHz, which is default. */
-       switch ((data->reg & data->mask) >> data->bit) {
+       switch ((regval & data->mask) >> data->bit) {
        case MLXCPLD_I2C_FREQ_1000KHZ:
                freq = MLXCPLD_I2C_FREQ_1000KHZ_SET;
                break;
index 477480d1de6bd10054009114381429949465af1f..7d4b3eb7077ad62e13db6ca2eda1b60521723232 100644 (file)
@@ -41,6 +41,8 @@
 #define I2C_HANDSHAKE_RST              0x0020
 #define I2C_FIFO_ADDR_CLR              0x0001
 #define I2C_DELAY_LEN                  0x0002
+#define I2C_ST_START_CON               0x8001
+#define I2C_FS_START_CON               0x1800
 #define I2C_TIME_CLR_VALUE             0x0000
 #define I2C_TIME_DEFAULT_VALUE         0x0003
 #define I2C_WRRD_TRANAC_VALUE          0x0002
@@ -480,6 +482,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
 {
        u16 control_reg;
        u16 intr_stat_reg;
+       u16 ext_conf_val;
 
        mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_START);
        intr_stat_reg = mtk_i2c_readw(i2c, OFFSET_INTR_STAT);
@@ -518,8 +521,13 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
        if (i2c->dev_comp->ltiming_adjust)
                mtk_i2c_writew(i2c, i2c->ltiming_reg, OFFSET_LTIMING);
 
+       if (i2c->speed_hz <= I2C_MAX_STANDARD_MODE_FREQ)
+               ext_conf_val = I2C_ST_START_CON;
+       else
+               ext_conf_val = I2C_FS_START_CON;
+
        if (i2c->dev_comp->timing_adjust) {
-               mtk_i2c_writew(i2c, i2c->ac_timing.ext, OFFSET_EXT_CONF);
+               ext_conf_val = i2c->ac_timing.ext;
                mtk_i2c_writew(i2c, i2c->ac_timing.inter_clk_div,
                               OFFSET_CLOCK_DIV);
                mtk_i2c_writew(i2c, I2C_SCL_MIS_COMP_VALUE,
@@ -544,6 +552,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
                                       OFFSET_HS_STA_STO_AC_TIMING);
                }
        }
+       mtk_i2c_writew(i2c, ext_conf_val, OFFSET_EXT_CONF);
 
        /* If use i2c pin from PMIC mt6397 side, need set PATH_DIR first */
        if (i2c->have_pmic)
index aaeeacc121215004c5d41d71e8f703035a4a1e34..546cc935e035a57dcb65c2896216a408f3a4a705 100644 (file)
@@ -454,6 +454,7 @@ static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
                        break;
 
                i2c_acpi_register_device(adapter, adev, &info);
+               put_device(&adapter->dev);
                break;
        case ACPI_RECONFIG_DEVICE_REMOVE:
                if (!acpi_device_enumerated(adev))
index 0019f1ea7df27248766bc0ee699a34a60a038af4..f41db9e0249a71be2ea4f9a9fd29cc33d6a474ae 100644 (file)
@@ -738,7 +738,7 @@ static irqreturn_t fxls8962af_interrupt(int irq, void *p)
 
        if (reg & FXLS8962AF_INT_STATUS_SRC_BUF) {
                ret = fxls8962af_fifo_flush(indio_dev);
-               if (ret)
+               if (ret < 0)
                        return IRQ_NONE;
 
                return IRQ_HANDLED;
index ee8ed9481025d2d26f192a5fd0450e3350799a29..2121a812b0c3148054b241da5e8c0da4c7fc4db0 100644 (file)
@@ -293,6 +293,7 @@ static const struct ad_sigma_delta_info ad7192_sigma_delta_info = {
        .has_registers = true,
        .addr_shift = 3,
        .read_mask = BIT(6),
+       .irq_flags = IRQF_TRIGGER_FALLING,
 };
 
 static const struct ad_sd_calib_data ad7192_calib_arr[8] = {
index 42bb952f47388abc9ab34fe15db200b3258ba914..b6e8c8abf6f4cdac9d357c78630a9186b68f8cb8 100644 (file)
@@ -203,7 +203,7 @@ static const struct ad_sigma_delta_info ad7780_sigma_delta_info = {
        .set_mode = ad7780_set_mode,
        .postprocess_sample = ad7780_postprocess_sample,
        .has_registers = false,
-       .irq_flags = IRQF_TRIGGER_LOW,
+       .irq_flags = IRQF_TRIGGER_FALLING,
 };
 
 #define _AD7780_CHANNEL(_bits, _wordsize, _mask_all)           \
index ef3e2d3ecb0c6d015a7c3456a3e6e5a6947f3551..0e7ab3fb072a961ecb6a0aaa60f36fa6110f4045 100644 (file)
@@ -206,7 +206,7 @@ static const struct ad_sigma_delta_info ad7793_sigma_delta_info = {
        .has_registers = true,
        .addr_shift = 3,
        .read_mask = BIT(6),
-       .irq_flags = IRQF_TRIGGER_LOW,
+       .irq_flags = IRQF_TRIGGER_FALLING,
 };
 
 static const struct ad_sd_calib_data ad7793_calib_arr[6] = {
index 19efaa41bc344b6def9d03bc59efbe62c6e64d82..34ec0c28b2dff6b145445e1aabf5fc89c20ac363 100644 (file)
@@ -183,6 +183,7 @@ static int aspeed_adc_probe(struct platform_device *pdev)
 
        data = iio_priv(indio_dev);
        data->dev = &pdev->dev;
+       platform_set_drvdata(pdev, indio_dev);
 
        data->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(data->base))
index 655ab02d03d8496bf73cabb24898ba7a1b8c37db..b753658bb41ec0f6d5ecb58f9955607cfaae4b58 100644 (file)
@@ -103,7 +103,7 @@ MODULE_DEVICE_TABLE(of, max1027_adc_dt_ids);
                        .sign = 'u',                                    \
                        .realbits = depth,                              \
                        .storagebits = 16,                              \
-                       .shift = 2,                                     \
+                       .shift = (depth == 10) ? 2 : 0,                 \
                        .endianness = IIO_BE,                           \
                },                                                      \
        }
@@ -142,7 +142,6 @@ MODULE_DEVICE_TABLE(of, max1027_adc_dt_ids);
        MAX1027_V_CHAN(11, depth)
 
 #define MAX1X31_CHANNELS(depth)                        \
-       MAX1X27_CHANNELS(depth),                \
        MAX1X29_CHANNELS(depth),                \
        MAX1027_V_CHAN(12, depth),              \
        MAX1027_V_CHAN(13, depth),              \
index 79c1dd68b9092c2449967f12837e0a3314114f70..d4fccd52ef08ba147a3fe3844ea2e31fa4f6fb0a 100644 (file)
@@ -82,6 +82,10 @@ static const struct iio_chan_spec mt6577_auxadc_iio_channels[] = {
        MT6577_AUXADC_CHANNEL(15),
 };
 
+/* For Voltage calculation */
+#define VOLTAGE_FULL_RANGE  1500       /* VA voltage */
+#define AUXADC_PRECISE      4096       /* 12 bits */
+
 static int mt_auxadc_get_cali_data(int rawdata, bool enable_cali)
 {
        return rawdata;
@@ -191,6 +195,10 @@ static int mt6577_auxadc_read_raw(struct iio_dev *indio_dev,
                }
                if (adc_dev->dev_comp->sample_data_cali)
                        *val = mt_auxadc_get_cali_data(*val, true);
+
+               /* Convert adc raw data to voltage: 0 - 1500 mV */
+               *val = *val * VOLTAGE_FULL_RANGE / AUXADC_PRECISE;
+
                return IIO_VAL_INT;
 
        default:
index 9996d5eef2893a4ad3152738064c5531db9b56be..32fbf57c362fa58733df8a576ac6e3481dffe6d0 100644 (file)
@@ -401,7 +401,7 @@ static int rzg2l_adc_hw_init(struct rzg2l_adc *adc)
 exit_hw_init:
        clk_disable_unprepare(adc->pclk);
 
-       return 0;
+       return ret;
 }
 
 static void rzg2l_adc_pm_runtime_disable(void *data)
@@ -570,8 +570,10 @@ static int __maybe_unused rzg2l_adc_pm_runtime_resume(struct device *dev)
                return ret;
 
        ret = clk_prepare_enable(adc->adclk);
-       if (ret)
+       if (ret) {
+               clk_disable_unprepare(adc->pclk);
                return ret;
+       }
 
        rzg2l_adc_pwr(adc, true);
 
index 3143f35a6509aa5b92b6aece0fa1de3ab71118d7..83c1ae07b3e9a1c7db56921f14c91c0f2c40c9d8 100644 (file)
@@ -171,7 +171,13 @@ static int adc128_probe(struct spi_device *spi)
        mutex_init(&adc->lock);
 
        ret = iio_device_register(indio_dev);
+       if (ret)
+               goto err_disable_regulator;
 
+       return 0;
+
+err_disable_regulator:
+       regulator_disable(adc->reg);
        return ret;
 }
 
index 4864c38b8d1c2d6f66c89fbe7a1407ebf9f4073f..769bd9280524a0d016eac827b200253844453c80 100644 (file)
@@ -137,7 +137,7 @@ static int ssp_print_mcu_debug(char *data_frame, int *data_index,
        if (length > received_len - *data_index || length <= 0) {
                ssp_dbg("[SSP]: MSG From MCU-invalid debug length(%d/%d)\n",
                        length, received_len);
-               return length ? length : -EPROTO;
+               return -EPROTO;
        }
 
        ssp_dbg("[SSP]: MSG From MCU - %s\n", &data_frame[*data_index]);
@@ -273,6 +273,8 @@ static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len)
        for (idx = 0; idx < len;) {
                switch (dataframe[idx++]) {
                case SSP_MSG2AP_INST_BYPASS_DATA:
+                       if (idx >= len)
+                               return -EPROTO;
                        sd = dataframe[idx++];
                        if (sd < 0 || sd >= SSP_SENSOR_MAX) {
                                dev_err(SSP_DEV,
@@ -282,10 +284,13 @@ static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len)
 
                        if (indio_devs[sd]) {
                                spd = iio_priv(indio_devs[sd]);
-                               if (spd->process_data)
+                               if (spd->process_data) {
+                                       if (idx >= len)
+                                               return -EPROTO;
                                        spd->process_data(indio_devs[sd],
                                                          &dataframe[idx],
                                                          data->timestamp);
+                               }
                        } else {
                                dev_err(SSP_DEV, "no client for frame\n");
                        }
@@ -293,6 +298,8 @@ static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len)
                        idx += ssp_offset_map[sd];
                        break;
                case SSP_MSG2AP_INST_DEBUG_DATA:
+                       if (idx >= len)
+                               return -EPROTO;
                        sd = ssp_print_mcu_debug(dataframe, &idx, len);
                        if (sd) {
                                dev_err(SSP_DEV,
index 2a5ba1b08a1d0af6c6aea10ea016a93a95381781..546a4cf6c5ef8325937b248b883f1663f6ae8a0d 100644 (file)
@@ -350,6 +350,7 @@ static int dac5571_probe(struct i2c_client *client,
                data->dac5571_pwrdwn = dac5571_pwrdwn_quad;
                break;
        default:
+               ret = -EINVAL;
                goto err;
        }
 
index eb48102f94243e41c49b4f4e66864d027fa0dfc3..287fff39a927aacf6a043627c4e0cb78f0ba850a 100644 (file)
@@ -353,10 +353,11 @@ static int adis16475_set_freq(struct adis16475 *st, const u32 freq)
        if (dec > st->info->max_dec)
                dec = st->info->max_dec;
 
-       ret = adis_write_reg_16(&st->adis, ADIS16475_REG_DEC_RATE, dec);
+       ret = __adis_write_reg_16(&st->adis, ADIS16475_REG_DEC_RATE, dec);
        if (ret)
                goto error;
 
+       adis_dev_unlock(&st->adis);
        /*
         * If decimation is used, then gyro and accel data will have meaningful
         * bits on the LSB registers. This info is used on the trigger handler.
index a869a6e52a16bec9eca5e196bd6c2ccef85d8ed7..ed129321a14da1175bb29cfae062a90879056e2c 100644 (file)
@@ -144,6 +144,7 @@ struct adis16480_chip_info {
        unsigned int max_dec_rate;
        const unsigned int *filter_freqs;
        bool has_pps_clk_mode;
+       bool has_sleep_cnt;
        const struct adis_data adis_data;
 };
 
@@ -939,6 +940,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
                .temp_scale = 5650, /* 5.65 milli degree Celsius */
                .int_clk = 2460000,
                .max_dec_rate = 2048,
+               .has_sleep_cnt = true,
                .filter_freqs = adis16480_def_filter_freqs,
                .adis_data = ADIS16480_DATA(16375, &adis16485_timeouts, 0),
        },
@@ -952,6 +954,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
                .temp_scale = 5650, /* 5.65 milli degree Celsius */
                .int_clk = 2460000,
                .max_dec_rate = 2048,
+               .has_sleep_cnt = true,
                .filter_freqs = adis16480_def_filter_freqs,
                .adis_data = ADIS16480_DATA(16480, &adis16480_timeouts, 0),
        },
@@ -965,6 +968,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
                .temp_scale = 5650, /* 5.65 milli degree Celsius */
                .int_clk = 2460000,
                .max_dec_rate = 2048,
+               .has_sleep_cnt = true,
                .filter_freqs = adis16480_def_filter_freqs,
                .adis_data = ADIS16480_DATA(16485, &adis16485_timeouts, 0),
        },
@@ -978,6 +982,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
                .temp_scale = 5650, /* 5.65 milli degree Celsius */
                .int_clk = 2460000,
                .max_dec_rate = 2048,
+               .has_sleep_cnt = true,
                .filter_freqs = adis16480_def_filter_freqs,
                .adis_data = ADIS16480_DATA(16488, &adis16485_timeouts, 0),
        },
@@ -1425,9 +1430,12 @@ static int adis16480_probe(struct spi_device *spi)
        if (ret)
                return ret;
 
-       ret = devm_add_action_or_reset(&spi->dev, adis16480_stop, indio_dev);
-       if (ret)
-               return ret;
+       if (st->chip_info->has_sleep_cnt) {
+               ret = devm_add_action_or_reset(&spi->dev, adis16480_stop,
+                                              indio_dev);
+               if (ret)
+                       return ret;
+       }
 
        ret = adis16480_config_irq_pin(spi->dev.of_node, st);
        if (ret)
index 52963da401a78e9bdbba5bc186455312a1c9c882..1880bd5bb2586c5b9f05b61fa1dda532bc7156d0 100644 (file)
@@ -276,6 +276,8 @@ static int opt3001_get_lux(struct opt3001 *opt, int *val, int *val2)
                ret = wait_event_timeout(opt->result_ready_queue,
                                opt->result_ready,
                                msecs_to_jiffies(OPT3001_RESULT_READY_LONG));
+               if (ret == 0)
+                       return -ETIMEDOUT;
        } else {
                /* Sleep for result ready time */
                timeout = (opt->int_time == OPT3001_INT_TIME_SHORT) ?
@@ -312,9 +314,7 @@ err:
                /* Disallow IRQ to access the device while lock is active */
                opt->ok_to_ignore_lock = false;
 
-       if (ret == 0)
-               return -ETIMEDOUT;
-       else if (ret < 0)
+       if (ret < 0)
                return ret;
 
        if (opt->use_irq) {
index f1099b4953014c628002341426eb949f441fb332..467519a2027e5519ead92730ab91c95473254f9b 100644 (file)
@@ -5,3 +5,4 @@
 
 # Keep in alphabetical order
 obj-$(CONFIG_IIO_TEST_FORMAT) += iio-test-format.o
+CFLAGS_iio-test-format.o += $(DISABLE_STRUCTLEAK_PLUGIN)
index c40791baced58836ba6bd5711e565c1e7ffedca1..704ce595542c5735e2390ebedc2cf2697333b8e5 100644 (file)
@@ -1746,15 +1746,16 @@ static void cma_cancel_route(struct rdma_id_private *id_priv)
        }
 }
 
-static void cma_cancel_listens(struct rdma_id_private *id_priv)
+static void _cma_cancel_listens(struct rdma_id_private *id_priv)
 {
        struct rdma_id_private *dev_id_priv;
 
+       lockdep_assert_held(&lock);
+
        /*
         * Remove from listen_any_list to prevent added devices from spawning
         * additional listen requests.
         */
-       mutex_lock(&lock);
        list_del(&id_priv->list);
 
        while (!list_empty(&id_priv->listen_list)) {
@@ -1768,6 +1769,12 @@ static void cma_cancel_listens(struct rdma_id_private *id_priv)
                rdma_destroy_id(&dev_id_priv->id);
                mutex_lock(&lock);
        }
+}
+
+static void cma_cancel_listens(struct rdma_id_private *id_priv)
+{
+       mutex_lock(&lock);
+       _cma_cancel_listens(id_priv);
        mutex_unlock(&lock);
 }
 
@@ -1776,6 +1783,14 @@ static void cma_cancel_operation(struct rdma_id_private *id_priv,
 {
        switch (state) {
        case RDMA_CM_ADDR_QUERY:
+               /*
+                * We can avoid doing the rdma_addr_cancel() based on state,
+                * only RDMA_CM_ADDR_QUERY has a work that could still execute.
+                * Notice that the addr_handler work could still be exiting
+                * outside this state, however due to the interaction with the
+                * handler_mutex the work is guaranteed not to touch id_priv
+                * during exit.
+                */
                rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
                break;
        case RDMA_CM_ROUTE_QUERY:
@@ -1810,6 +1825,8 @@ static void cma_release_port(struct rdma_id_private *id_priv)
 static void destroy_mc(struct rdma_id_private *id_priv,
                       struct cma_multicast *mc)
 {
+       bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
+
        if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))
                ib_sa_free_multicast(mc->sa_mc);
 
@@ -1826,7 +1843,10 @@ static void destroy_mc(struct rdma_id_private *id_priv,
 
                        cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,
                                     &mgid);
-                       cma_igmp_send(ndev, &mgid, false);
+
+                       if (!send_only)
+                               cma_igmp_send(ndev, &mgid, false);
+
                        dev_put(ndev);
                }
 
@@ -2574,7 +2594,7 @@ static int cma_listen_on_all(struct rdma_id_private *id_priv)
        return 0;
 
 err_listen:
-       list_del(&id_priv->list);
+       _cma_cancel_listens(id_priv);
        mutex_unlock(&lock);
        if (to_destroy)
                rdma_destroy_id(&to_destroy->id);
@@ -3413,6 +3433,21 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
                if (dst_addr->sa_family == AF_IB) {
                        ret = cma_resolve_ib_addr(id_priv);
                } else {
+                       /*
+                        * The FSM can return back to RDMA_CM_ADDR_BOUND after
+                        * rdma_resolve_ip() is called, eg through the error
+                        * path in addr_handler(). If this happens the existing
+                        * request must be canceled before issuing a new one.
+                        * Since canceling a request is a bit slow and this
+                        * oddball path is rare, keep track once a request has
+                        * been issued. The track turns out to be a permanent
+                        * state since this is the only cancel as it is
+                        * immediately before rdma_resolve_ip().
+                        */
+                       if (id_priv->used_resolve_ip)
+                               rdma_addr_cancel(&id->route.addr.dev_addr);
+                       else
+                               id_priv->used_resolve_ip = 1;
                        ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
                                              &id->route.addr.dev_addr,
                                              timeout_ms, addr_handler,
@@ -3771,9 +3806,13 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
        int ret;
 
        if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) {
+               struct sockaddr_in any_in = {
+                       .sin_family = AF_INET,
+                       .sin_addr.s_addr = htonl(INADDR_ANY),
+               };
+
                /* For a well behaved ULP state will be RDMA_CM_IDLE */
-               id->route.addr.src_addr.ss_family = AF_INET;
-               ret = rdma_bind_addr(id, cma_src_addr(id_priv));
+               ret = rdma_bind_addr(id, (struct sockaddr *)&any_in);
                if (ret)
                        return ret;
                if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
index 5c463da9984536c6795ee7905fb08848235b4cb9..f92f101ea9818f8c1ecf11375353103dce234858 100644 (file)
@@ -91,6 +91,7 @@ struct rdma_id_private {
        u8                      afonly;
        u8                      timeout;
        u8                      min_rnr_timer;
+       u8 used_resolve_ip;
        enum ib_gid_type        gid_type;
 
        /*
index a20b8108e160c5f25c4fb9cd27dd0a0a216d0b5d..c00f8e28aab7540cd994cb2f6d88c9cb046070ba 100644 (file)
@@ -706,8 +706,9 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
 
        /* Construct the family header first */
        header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
-       memcpy(header->device_name, dev_name(&query->port->agent->device->dev),
-              LS_DEVICE_NAME_MAX);
+       strscpy_pad(header->device_name,
+                   dev_name(&query->port->agent->device->dev),
+                   LS_DEVICE_NAME_MAX);
        header->port_num = query->port->port_num;
 
        if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
index e74ddbe465891df1ccf42ba0db1d39ff829e27d7..15b0cb0f363f4244acc7b47be79794445187e55f 100644 (file)
@@ -876,14 +876,14 @@ void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q)
        struct hfi1_ipoib_txq *txq = &priv->txqs[q];
        u64 completed = atomic64_read(&txq->complete_txreqs);
 
-       dd_dev_info(priv->dd, "timeout txq %llx q %u stopped %u stops %d no_desc %d ring_full %d\n",
-                   (unsigned long long)txq, q,
+       dd_dev_info(priv->dd, "timeout txq %p q %u stopped %u stops %d no_desc %d ring_full %d\n",
+                   txq, q,
                    __netif_subqueue_stopped(dev, txq->q_idx),
                    atomic_read(&txq->stops),
                    atomic_read(&txq->no_desc),
                    atomic_read(&txq->ring_full));
-       dd_dev_info(priv->dd, "sde %llx engine %u\n",
-                   (unsigned long long)txq->sde,
+       dd_dev_info(priv->dd, "sde %p engine %u\n",
+                   txq->sde,
                    txq->sde ? txq->sde->this_idx : 0);
        dd_dev_info(priv->dd, "flow %x\n", txq->flow.as_int);
        dd_dev_info(priv->dd, "sent %llu completed %llu used %llu\n",
index 489b436f19bb3df07b361a0487feb43e7e05792d..3d42bd2b36bd431788cfd11e43d212d6e76a3871 100644 (file)
@@ -878,6 +878,7 @@ void sc_disable(struct send_context *sc)
 {
        u64 reg;
        struct pio_buf *pbuf;
+       LIST_HEAD(wake_list);
 
        if (!sc)
                return;
@@ -912,19 +913,21 @@ void sc_disable(struct send_context *sc)
        spin_unlock(&sc->release_lock);
 
        write_seqlock(&sc->waitlock);
-       while (!list_empty(&sc->piowait)) {
+       if (!list_empty(&sc->piowait))
+               list_move(&sc->piowait, &wake_list);
+       write_sequnlock(&sc->waitlock);
+       while (!list_empty(&wake_list)) {
                struct iowait *wait;
                struct rvt_qp *qp;
                struct hfi1_qp_priv *priv;
 
-               wait = list_first_entry(&sc->piowait, struct iowait, list);
+               wait = list_first_entry(&wake_list, struct iowait, list);
                qp = iowait_to_qp(wait);
                priv = qp->priv;
                list_del_init(&priv->s_iowait.list);
                priv->s_iowait.lock = NULL;
                hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
        }
-       write_sequnlock(&sc->waitlock);
 
        spin_unlock_irq(&sc->alloc_lock);
 }
index 1e9c3c5bee684485526fd16bd077a9676029d0e7..d763f097599ff8f18a2750fc15464582f475886a 100644 (file)
@@ -326,19 +326,30 @@ static void set_cq_param(struct hns_roce_cq *hr_cq, u32 cq_entries, int vector,
        INIT_LIST_HEAD(&hr_cq->rq_list);
 }
 
-static void set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
-                        struct hns_roce_ib_create_cq *ucmd)
+static int set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
+                       struct hns_roce_ib_create_cq *ucmd)
 {
        struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
 
-       if (udata) {
-               if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size))
-                       hr_cq->cqe_size = ucmd->cqe_size;
-               else
-                       hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
-       } else {
+       if (!udata) {
                hr_cq->cqe_size = hr_dev->caps.cqe_sz;
+               return 0;
+       }
+
+       if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) {
+               if (ucmd->cqe_size != HNS_ROCE_V2_CQE_SIZE &&
+                   ucmd->cqe_size != HNS_ROCE_V3_CQE_SIZE) {
+                       ibdev_err(&hr_dev->ib_dev,
+                                 "invalid cqe size %u.\n", ucmd->cqe_size);
+                       return -EINVAL;
+               }
+
+               hr_cq->cqe_size = ucmd->cqe_size;
+       } else {
+               hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
        }
+
+       return 0;
 }
 
 int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
@@ -366,7 +377,9 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
 
        set_cq_param(hr_cq, attr->cqe, attr->comp_vector, &ucmd);
 
-       set_cqe_size(hr_cq, udata, &ucmd);
+       ret = set_cqe_size(hr_cq, udata, &ucmd);
+       if (ret)
+               return ret;
 
        ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
        if (ret) {
index 5b9953105752c397ac2cacdce4e7eaaf8a993387..d5f3faa1627a41c08224e0fb6fe9c31d8a1e9c13 100644 (file)
@@ -3299,7 +3299,7 @@ static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
                        dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
                                          hr_cq->ib_cq.cqe);
                        owner_bit = hr_reg_read(dest, CQE_OWNER);
-                       memcpy(dest, cqe, sizeof(*cqe));
+                       memcpy(dest, cqe, hr_cq->cqe_size);
                        hr_reg_write(dest, CQE_OWNER, owner_bit);
                }
        }
@@ -4397,7 +4397,12 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
        hr_qp->path_mtu = ib_mtu;
 
        mtu = ib_mtu_enum_to_int(ib_mtu);
-       if (WARN_ON(mtu < 0))
+       if (WARN_ON(mtu <= 0))
+               return -EINVAL;
+#define MAX_LP_MSG_LEN 65536
+       /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
+       lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
+       if (WARN_ON(lp_pktn_ini >= 0xF))
                return -EINVAL;
 
        if (attr_mask & IB_QP_PATH_MTU) {
@@ -4405,10 +4410,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
                hr_reg_clear(qpc_mask, QPC_MTU);
        }
 
-#define MAX_LP_MSG_LEN 65536
-       /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
-       lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
-
        hr_reg_write(context, QPC_LP_PKTN_INI, lp_pktn_ini);
        hr_reg_clear(qpc_mask, QPC_LP_PKTN_INI);
 
index 6b62299abfbbbbb49d9539ec872f104b59e51adb..6dea0a49d1718384d662df8071c4fec98791a475 100644 (file)
@@ -3496,7 +3496,7 @@ static void irdma_cm_disconn_true(struct irdma_qp *iwqp)
             original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
             last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
             last_ae == IRDMA_AE_BAD_CLOSE ||
-            last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->reset)) {
+            last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset)) {
                issue_close = 1;
                iwqp->cm_id = NULL;
                qp->term_flags = 0;
@@ -4250,7 +4250,7 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
                                       teardown_entry);
                attr.qp_state = IB_QPS_ERR;
                irdma_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
-               if (iwdev->reset)
+               if (iwdev->rf->reset)
                        irdma_cm_disconn(cm_node->iwqp);
                irdma_rem_ref_cm_node(cm_node);
        }
index 00de5ee9a260950a987625df112165fbf98b70cb..7de525a5ccf8c9a6d72f5c92d6fe05098325bb9b 100644 (file)
@@ -176,6 +176,14 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
        case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
                qp->flush_code = FLUSH_GENERAL_ERR;
                break;
+       case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+               qp->flush_code = FLUSH_RETRY_EXC_ERR;
+               break;
+       case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
+       case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
+       case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
+               qp->flush_code = FLUSH_MW_BIND_ERR;
+               break;
        default:
                qp->flush_code = FLUSH_FATAL_ERR;
                break;
@@ -1489,7 +1497,7 @@ void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi)
 
        irdma_puda_dele_rsrc(vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, false);
        if (irdma_initialize_ieq(iwdev)) {
-               iwdev->reset = true;
+               iwdev->rf->reset = true;
                rf->gen_ops.request_reset(rf);
        }
 }
@@ -1632,13 +1640,13 @@ void irdma_rt_deinit_hw(struct irdma_device *iwdev)
        case IEQ_CREATED:
                if (!iwdev->roce_mode)
                        irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
-                                            iwdev->reset);
+                                            iwdev->rf->reset);
                fallthrough;
        case ILQ_CREATED:
                if (!iwdev->roce_mode)
                        irdma_puda_dele_rsrc(&iwdev->vsi,
                                             IRDMA_PUDA_RSRC_TYPE_ILQ,
-                                            iwdev->reset);
+                                            iwdev->rf->reset);
                break;
        default:
                ibdev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state);
index bddf88194d095ea377dbecfcbf7fa4701fb512d5..d219f64b2c3d5fbd8b98f8c40ce267d773ef6a9e 100644 (file)
@@ -55,7 +55,7 @@ static void i40iw_close(struct i40e_info *cdev_info, struct i40e_client *client,
 
        iwdev = to_iwdev(ibdev);
        if (reset)
-               iwdev->reset = true;
+               iwdev->rf->reset = true;
 
        iwdev->iw_status = 0;
        irdma_port_ibevent(iwdev);
index 743d9e143a999f253a48b1e6f210d8ee0ccbebba..b678fe712447e1bf0126574f12eacee6716c2d34 100644 (file)
@@ -346,7 +346,6 @@ struct irdma_device {
        bool roce_mode:1;
        bool roce_dcqcn_en:1;
        bool dcb:1;
-       bool reset:1;
        bool iw_ooo:1;
        enum init_completion_state init_state;
 
index 5fb92de1f015aa996c0780969775f2ce88507813..9b544a3b128867361084884386abff8ef15c905b 100644 (file)
@@ -1092,12 +1092,12 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
                if (cq->avoid_mem_cflct) {
                        ext_cqe = (__le64 *)((u8 *)cqe + 32);
                        get_64bit_val(ext_cqe, 24, &qword7);
-                       polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+                       polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
                } else {
                        peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
                        ext_cqe = cq->cq_base[peek_head].buf;
                        get_64bit_val(ext_cqe, 24, &qword7);
-                       polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+                       polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
                        if (!peek_head)
                                polarity ^= 1;
                }
index ff705f323233375a5ea150adb8daa2797e1f224f..3dcbb1fbf2c665585b060bb27af6f7d1099ff292 100644 (file)
@@ -102,6 +102,8 @@ enum irdma_flush_opcode {
        FLUSH_REM_OP_ERR,
        FLUSH_LOC_LEN_ERR,
        FLUSH_FATAL_ERR,
+       FLUSH_RETRY_EXC_ERR,
+       FLUSH_MW_BIND_ERR,
 };
 
 enum irdma_cmpl_status {
index e94470991fe0eb25a0f338ea9a18080c2671f6bb..ac91ea5296db99af4c3e10f3de81cf902ae7ff03 100644 (file)
@@ -2507,7 +2507,7 @@ void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp)
        struct irdma_qp *qp = sc_qp->qp_uk.back_qp;
        struct ib_qp_attr attr;
 
-       if (qp->iwdev->reset)
+       if (qp->iwdev->rf->reset)
                return;
        attr.qp_state = IB_QPS_ERR;
 
index 4fc32340207361c0de003fe0c1f59dad01ff2ca2..102dc9342f2a211e4f108b70222dfb9eaeffd8fa 100644 (file)
@@ -535,8 +535,7 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
        irdma_qp_rem_ref(&iwqp->ibqp);
        wait_for_completion(&iwqp->free_qp);
        irdma_free_lsmm_rsrc(iwqp);
-       if (!iwdev->reset)
-               irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
+       irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
 
        if (!iwqp->user_mode) {
                if (iwqp->iwscq) {
@@ -2035,7 +2034,7 @@ static int irdma_create_cq(struct ib_cq *ibcq,
                /* Kmode allocations */
                int rsize;
 
-               if (entries > rf->max_cqe) {
+               if (entries < 1 || entries > rf->max_cqe) {
                        err_code = -EINVAL;
                        goto cq_free_rsrc;
                }
@@ -3353,6 +3352,10 @@ static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode
                return IB_WC_LOC_LEN_ERR;
        case FLUSH_GENERAL_ERR:
                return IB_WC_WR_FLUSH_ERR;
+       case FLUSH_RETRY_EXC_ERR:
+               return IB_WC_RETRY_EXC_ERR;
+       case FLUSH_MW_BIND_ERR:
+               return IB_WC_MW_BIND_ERR;
        case FLUSH_FATAL_ERR:
        default:
                return IB_WC_FATAL_ERR;
@@ -3396,9 +3399,13 @@ static void irdma_process_cqe(struct ib_wc *entry,
                }
 
                if (cq_poll_info->ud_vlan_valid) {
-                       entry->vlan_id = cq_poll_info->ud_vlan & VLAN_VID_MASK;
-                       entry->wc_flags |= IB_WC_WITH_VLAN;
+                       u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK;
+
                        entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
+                       if (vlan) {
+                               entry->vlan_id = vlan;
+                               entry->wc_flags |= IB_WC_WITH_VLAN;
+                       }
                } else {
                        entry->sl = 0;
                }
index b68c575eb78e7c618096b4021fa014e76a57ded8..b0d6ee0739f534d60e91a259fb55bd6ab1fc0d9c 100644 (file)
@@ -330,8 +330,10 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
 
                tc_node->enable = true;
                ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_MODIFY_NODE);
-               if (ret)
+               if (ret) {
+                       vsi->unregister_qset(vsi, tc_node);
                        goto reg_err;
+               }
        }
        ibdev_dbg(to_ibdev(vsi->dev),
                  "WS: Using node %d which represents VSI %d TC %d\n",
@@ -350,6 +352,10 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
        }
        goto exit;
 
+reg_err:
+       irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE);
+       list_del(&tc_node->siblings);
+       irdma_free_node(vsi, tc_node);
 leaf_add_err:
        if (list_empty(&vsi_node->child_list_head)) {
                if (irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE))
@@ -369,11 +375,6 @@ vsi_add_err:
 exit:
        mutex_unlock(&vsi->dev->ws_mutex);
        return ret;
-
-reg_err:
-       mutex_unlock(&vsi->dev->ws_mutex);
-       irdma_ws_remove(vsi, user_pri);
-       return ret;
 }
 
 /**
index 3be36ebbf67ae0a626e83556daadae4759335515..22e2f4d79743d198c780475381bc16f8bf1c4a54 100644 (file)
@@ -1339,7 +1339,6 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
                goto err_2;
        }
        mr->mmkey.type = MLX5_MKEY_MR;
-       mr->desc_size = sizeof(struct mlx5_mtt);
        mr->umem = umem;
        set_mr_fields(dev, mr, umem->length, access_flags);
        kvfree(in);
@@ -1533,6 +1532,7 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
                ib_umem_release(&odp->umem);
                return ERR_CAST(mr);
        }
+       xa_init(&mr->implicit_children);
 
        odp->private = mr;
        err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
index b2fca110346c1740fe9a5912165ffe4270e667b0..e5abbcfc1d5748abf6716c46fce1b8fb466b42db 100644 (file)
@@ -4458,6 +4458,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
                MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
                MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
+               if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
+                       MLX5_SET(dctc, dctc, eth_prio, attr->ah_attr.sl & 0x7);
 
                err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
                                           MLX5_ST_SZ_BYTES(create_dct_in), out,
index 3cb4febaad0fa7b35b769b3f05fccf61e0df11ff..8def88cfa30091eac2f8f609459d57dcc0f224a2 100644 (file)
@@ -455,6 +455,7 @@ struct qedr_qp {
        /* synchronization objects used with iwarp ep */
        struct kref refcnt;
        struct completion iwarp_cm_comp;
+       struct completion qp_rel_comp;
        unsigned long iwarp_cm_flags; /* enum iwarp_cm_flags */
 };
 
index 1715fbe0719d85f27e1e7287435caa93b845858e..a51fc68549844de87c199d23f89c113c7fae1409 100644 (file)
@@ -83,7 +83,7 @@ static void qedr_iw_free_qp(struct kref *ref)
 {
        struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt);
 
-       kfree(qp);
+       complete(&qp->qp_rel_comp);
 }
 
 static void
index 3fbf172dbbef41fa6c7c74debd17d6b517a09c41..dcb3653db72d7b6fad25bca546ea81ed6ae9a61c 100644 (file)
@@ -1357,6 +1357,7 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
        if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
                kref_init(&qp->refcnt);
                init_completion(&qp->iwarp_cm_comp);
+               init_completion(&qp->qp_rel_comp);
        }
 
        qp->pd = pd;
@@ -2857,8 +2858,10 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
 
        qedr_free_qp_resources(dev, qp, udata);
 
-       if (rdma_protocol_iwarp(&dev->ibdev, 1))
+       if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
                qedr_iw_qp_rem_ref(&qp->ibqp);
+               wait_for_completion(&qp->qp_rel_comp);
+       }
 
        return 0;
 }
index 452e2355d24eeb28ca1628ebb3bd4bda9d00ed74..0a3b28142c05b6b5d76910f310e2cdf79159ac5a 100644 (file)
@@ -403,7 +403,7 @@ static ssize_t diagc_attr_store(struct ib_device *ibdev, u32 port_num,
 }
 
 #define QIB_DIAGC_ATTR(N)                                                      \
-       static_assert(&((struct qib_ibport *)0)->rvp.n_##N != (u64 *)NULL);    \
+       static_assert(__same_type(((struct qib_ibport *)0)->rvp.n_##N, u64));  \
        static struct qib_diagc_attr qib_diagc_attr_##N = {                    \
                .attr = __ATTR(N, 0664, diagc_attr_show, diagc_attr_store),    \
                .counter =                                                     \
index a67599b5a550afd794f1e54c8f02cefdb3c8e35c..ac11943a5ddb0f28f83f08b93a0fdae6b7c6a75a 100644 (file)
@@ -602,7 +602,7 @@ done:
 /*
  * How many pages in this iovec element?
  */
-static int qib_user_sdma_num_pages(const struct iovec *iov)
+static size_t qib_user_sdma_num_pages(const struct iovec *iov)
 {
        const unsigned long addr  = (unsigned long) iov->iov_base;
        const unsigned long  len  = iov->iov_len;
@@ -658,7 +658,7 @@ static void qib_user_sdma_free_pkt_frag(struct device *dev,
 static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
                                   struct qib_user_sdma_queue *pq,
                                   struct qib_user_sdma_pkt *pkt,
-                                  unsigned long addr, int tlen, int npages)
+                                  unsigned long addr, int tlen, size_t npages)
 {
        struct page *pages[8];
        int i, j;
@@ -722,7 +722,7 @@ static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
        unsigned long idx;
 
        for (idx = 0; idx < niov; idx++) {
-               const int npages = qib_user_sdma_num_pages(iov + idx);
+               const size_t npages = qib_user_sdma_num_pages(iov + idx);
                const unsigned long addr = (unsigned long) iov[idx].iov_base;
 
                ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
@@ -824,8 +824,8 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
                unsigned pktnw;
                unsigned pktnwc;
                int nfrags = 0;
-               int npages = 0;
-               int bytes_togo = 0;
+               size_t npages = 0;
+               size_t bytes_togo = 0;
                int tiddma = 0;
                int cfur;
 
@@ -885,7 +885,11 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
 
                        npages += qib_user_sdma_num_pages(&iov[idx]);
 
-                       bytes_togo += slen;
+                       if (check_add_overflow(bytes_togo, slen, &bytes_togo) ||
+                           bytes_togo > type_max(typeof(pkt->bytes_togo))) {
+                               ret = -EINVAL;
+                               goto free_pbc;
+                       }
                        pktnwc += slen >> 2;
                        idx++;
                        nfrags++;
@@ -904,8 +908,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
                }
 
                if (frag_size) {
-                       int tidsmsize, n;
-                       size_t pktsize;
+                       size_t tidsmsize, n, pktsize, sz, addrlimit;
 
                        n = npages*((2*PAGE_SIZE/frag_size)+1);
                        pktsize = struct_size(pkt, addr, n);
@@ -923,14 +926,24 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
                        else
                                tidsmsize = 0;
 
-                       pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL);
+                       if (check_add_overflow(pktsize, tidsmsize, &sz)) {
+                               ret = -EINVAL;
+                               goto free_pbc;
+                       }
+                       pkt = kmalloc(sz, GFP_KERNEL);
                        if (!pkt) {
                                ret = -ENOMEM;
                                goto free_pbc;
                        }
                        pkt->largepkt = 1;
                        pkt->frag_size = frag_size;
-                       pkt->addrlimit = n + ARRAY_SIZE(pkt->addr);
+                       if (check_add_overflow(n, ARRAY_SIZE(pkt->addr),
+                                              &addrlimit) ||
+                           addrlimit > type_max(typeof(pkt->addrlimit))) {
+                               ret = -EINVAL;
+                               goto free_pbc;
+                       }
+                       pkt->addrlimit = addrlimit;
 
                        if (tiddma) {
                                char *tidsm = (char *)pkt + pktsize;
index 84dd682d23341beb46c89211cfa3e4385ada5488..b350081aeb5a323d06a64221194ca5ab903248c9 100644 (file)
@@ -90,7 +90,7 @@ struct usnic_ib_dev {
 
 struct usnic_ib_vf {
        struct usnic_ib_dev             *pf;
-       spinlock_t                      lock;
+       struct mutex                    lock;
        struct usnic_vnic               *vnic;
        unsigned int                    qp_grp_ref_cnt;
        struct usnic_ib_pd              *pd;
index 228e9a36dad0d8e17465a141b43e8bb1019bdd3a..d346dd48e731b130d135d9218aa18295aebd52df 100644 (file)
@@ -572,7 +572,7 @@ static int usnic_ib_pci_probe(struct pci_dev *pdev,
        }
 
        vf->pf = pf;
-       spin_lock_init(&vf->lock);
+       mutex_init(&vf->lock);
        mutex_lock(&pf->usdev_lock);
        list_add_tail(&vf->link, &pf->vf_dev_list);
        /*
index 06a4e9d4545da0cd38e1ec146352da0ef8803f08..756a83bcff58afbec6bc43f878a4a9a96b1aab98 100644 (file)
@@ -196,7 +196,7 @@ find_free_vf_and_create_qp_grp(struct ib_qp *qp,
                for (i = 0; dev_list[i]; i++) {
                        dev = dev_list[i];
                        vf = dev_get_drvdata(dev);
-                       spin_lock(&vf->lock);
+                       mutex_lock(&vf->lock);
                        vnic = vf->vnic;
                        if (!usnic_vnic_check_room(vnic, res_spec)) {
                                usnic_dbg("Found used vnic %s from %s\n",
@@ -208,10 +208,10 @@ find_free_vf_and_create_qp_grp(struct ib_qp *qp,
                                                             vf, pd, res_spec,
                                                             trans_spec);
 
-                               spin_unlock(&vf->lock);
+                               mutex_unlock(&vf->lock);
                                goto qp_grp_check;
                        }
-                       spin_unlock(&vf->lock);
+                       mutex_unlock(&vf->lock);
 
                }
                usnic_uiom_free_dev_list(dev_list);
@@ -220,7 +220,7 @@ find_free_vf_and_create_qp_grp(struct ib_qp *qp,
 
        /* Try to find resources on an unused vf */
        list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) {
-               spin_lock(&vf->lock);
+               mutex_lock(&vf->lock);
                vnic = vf->vnic;
                if (vf->qp_grp_ref_cnt == 0 &&
                    usnic_vnic_check_room(vnic, res_spec) == 0) {
@@ -228,10 +228,10 @@ find_free_vf_and_create_qp_grp(struct ib_qp *qp,
                                                     vf, pd, res_spec,
                                                     trans_spec);
 
-                       spin_unlock(&vf->lock);
+                       mutex_unlock(&vf->lock);
                        goto qp_grp_check;
                }
-               spin_unlock(&vf->lock);
+               mutex_unlock(&vf->lock);
        }
 
        usnic_info("No free qp grp found on %s\n",
@@ -253,9 +253,9 @@ static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
 
        WARN_ON(qp_grp->state != IB_QPS_RESET);
 
-       spin_lock(&vf->lock);
+       mutex_lock(&vf->lock);
        usnic_ib_qp_grp_destroy(qp_grp);
-       spin_unlock(&vf->lock);
+       mutex_unlock(&vf->lock);
 }
 
 static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
index 49bdd78ac664329aaa7e2eab7b9295fdcb651bd3..3305f2744bfaaddf990979028e9659b680791e81 100644 (file)
@@ -1223,7 +1223,7 @@ int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
        spin_lock(&rdi->n_qps_lock);
        if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
                spin_unlock(&rdi->n_qps_lock);
-               ret = ENOMEM;
+               ret = -ENOMEM;
                goto bail_ip;
        }
 
index 29de8412e41655f7fae6e32884180eda090f4a27..4c914f75a9027db503db69d9a04443059c8b5712 100644 (file)
@@ -334,6 +334,7 @@ static const struct xpad_device {
        { 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
        { 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 },
        { 0x24c6, 0xfafe, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+       { 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 },
        { 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
        { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
        { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
@@ -451,6 +452,7 @@ static const struct usb_device_id xpad_table[] = {
        XPAD_XBOXONE_VENDOR(0x24c6),            /* PowerA Controllers */
        XPAD_XBOXONE_VENDOR(0x2e24),            /* Hyperkin Duke X-Box One pad */
        XPAD_XBOX360_VENDOR(0x2f24),            /* GameSir Controllers */
+       XPAD_XBOX360_VENDOR(0x3285),            /* Nacon GC-100 */
        { }
 };
 
index 2f5e3ab5ed638c9f7d0691f40528b766cf794134..65286762b02ab918403d91de894e849820010ec1 100644 (file)
@@ -3,6 +3,7 @@
 // Driver for the IMX SNVS ON/OFF Power Key
 // Copyright (C) 2015 Freescale Semiconductor, Inc. All Rights Reserved.
 
+#include <linux/clk.h>
 #include <linux/device.h>
 #include <linux/err.h>
 #include <linux/init.h>
@@ -99,6 +100,11 @@ static irqreturn_t imx_snvs_pwrkey_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+static void imx_snvs_pwrkey_disable_clk(void *data)
+{
+       clk_disable_unprepare(data);
+}
+
 static void imx_snvs_pwrkey_act(void *pdata)
 {
        struct pwrkey_drv_data *pd = pdata;
@@ -111,6 +117,7 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
        struct pwrkey_drv_data *pdata;
        struct input_dev *input;
        struct device_node *np;
+       struct clk *clk;
        int error;
        u32 vid;
 
@@ -134,6 +141,28 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
                dev_warn(&pdev->dev, "KEY_POWER without setting in dts\n");
        }
 
+       clk = devm_clk_get_optional(&pdev->dev, NULL);
+       if (IS_ERR(clk)) {
+               dev_err(&pdev->dev, "Failed to get snvs clock (%pe)\n", clk);
+               return PTR_ERR(clk);
+       }
+
+       error = clk_prepare_enable(clk);
+       if (error) {
+               dev_err(&pdev->dev, "Failed to enable snvs clock (%pe)\n",
+                       ERR_PTR(error));
+               return error;
+       }
+
+       error = devm_add_action_or_reset(&pdev->dev,
+                                        imx_snvs_pwrkey_disable_clk, clk);
+       if (error) {
+               dev_err(&pdev->dev,
+                       "Failed to register clock cleanup handler (%pe)\n",
+                       ERR_PTR(error));
+               return error;
+       }
+
        pdata->wakeup = of_property_read_bool(np, "wakeup-source");
 
        pdata->irq = platform_get_irq(pdev, 0);
index dd18cb917c4df7f1241f32e1f546cbd3113ed239..4620e20d0190f7bb0e5ab197cca0765dcd8ad821 100644 (file)
@@ -80,27 +80,27 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
 
        data_present = touchscreen_get_prop_u32(dev, "touchscreen-min-x",
                                                input_abs_get_min(input, axis_x),
-                                               &minimum) |
-                      touchscreen_get_prop_u32(dev, "touchscreen-size-x",
-                                               input_abs_get_max(input,
-                                                                 axis_x) + 1,
-                                               &maximum) |
-                      touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x",
-                                               input_abs_get_fuzz(input, axis_x),
-                                               &fuzz);
+                                               &minimum);
+       data_present |= touchscreen_get_prop_u32(dev, "touchscreen-size-x",
+                                                input_abs_get_max(input,
+                                                                  axis_x) + 1,
+                                                &maximum);
+       data_present |= touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x",
+                                                input_abs_get_fuzz(input, axis_x),
+                                                &fuzz);
        if (data_present)
                touchscreen_set_params(input, axis_x, minimum, maximum - 1, fuzz);
 
        data_present = touchscreen_get_prop_u32(dev, "touchscreen-min-y",
                                                input_abs_get_min(input, axis_y),
-                                               &minimum) |
-                      touchscreen_get_prop_u32(dev, "touchscreen-size-y",
-                                               input_abs_get_max(input,
-                                                                 axis_y) + 1,
-                                               &maximum) |
-                      touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y",
-                                               input_abs_get_fuzz(input, axis_y),
-                                               &fuzz);
+                                               &minimum);
+       data_present |= touchscreen_get_prop_u32(dev, "touchscreen-size-y",
+                                                input_abs_get_max(input,
+                                                                  axis_y) + 1,
+                                                &maximum);
+       data_present |= touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y",
+                                                input_abs_get_fuzz(input, axis_y),
+                                                &fuzz);
        if (data_present)
                touchscreen_set_params(input, axis_y, minimum, maximum - 1, fuzz);
 
@@ -108,11 +108,11 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
        data_present = touchscreen_get_prop_u32(dev,
                                                "touchscreen-max-pressure",
                                                input_abs_get_max(input, axis),
-                                               &maximum) |
-                      touchscreen_get_prop_u32(dev,
-                                               "touchscreen-fuzz-pressure",
-                                               input_abs_get_fuzz(input, axis),
-                                               &fuzz);
+                                               &maximum);
+       data_present |= touchscreen_get_prop_u32(dev,
+                                                "touchscreen-fuzz-pressure",
+                                                input_abs_get_fuzz(input, axis),
+                                                &fuzz);
        if (data_present)
                touchscreen_set_params(input, axis, 0, maximum, fuzz);
 
index 744544a723b7748510605329e24910bf42ecc275..6f754a8d30b11776381b3fe78427c09226b98385 100644 (file)
@@ -71,19 +71,22 @@ static int grts_cb(const void *data, void *private)
                unsigned int z2 = touch_info[st->ch_map[GRTS_CH_Z2]];
                unsigned int Rt;
 
-               Rt = z2;
-               Rt -= z1;
-               Rt *= st->x_plate_ohms;
-               Rt = DIV_ROUND_CLOSEST(Rt, 16);
-               Rt *= x;
-               Rt /= z1;
-               Rt = DIV_ROUND_CLOSEST(Rt, 256);
-               /*
-                * On increased pressure the resistance (Rt) is decreasing
-                * so, convert values to make it looks as real pressure.
-                */
-               if (Rt < GRTS_DEFAULT_PRESSURE_MAX)
-                       press = GRTS_DEFAULT_PRESSURE_MAX - Rt;
+               if (likely(x && z1)) {
+                       Rt = z2;
+                       Rt -= z1;
+                       Rt *= st->x_plate_ohms;
+                       Rt = DIV_ROUND_CLOSEST(Rt, 16);
+                       Rt *= x;
+                       Rt /= z1;
+                       Rt = DIV_ROUND_CLOSEST(Rt, 256);
+                       /*
+                        * On increased pressure the resistance (Rt) is
+                        * decreasing so, convert values to make it looks as
+                        * real pressure.
+                        */
+                       if (Rt < GRTS_DEFAULT_PRESSURE_MAX)
+                               press = GRTS_DEFAULT_PRESSURE_MAX - Rt;
+               }
        }
 
        if ((!x && !y) || (st->pressure && (press < st->pressure_min))) {
index 632dbdd2191500e62efc0ff500e0ac488f07d953..fb23a5b780a46e2f5c999b19585472dc12589b8f 100644 (file)
@@ -44,9 +44,9 @@
 #define NOC_PERM_MODE_BYPASS           (1 << NOC_QOS_MODE_BYPASS)
 
 #define NOC_QOS_PRIORITYn_ADDR(n)      (0x8 + (n * 0x1000))
-#define NOC_QOS_PRIORITY_MASK          0xf
+#define NOC_QOS_PRIORITY_P1_MASK       0xc
+#define NOC_QOS_PRIORITY_P0_MASK       0x3
 #define NOC_QOS_PRIORITY_P1_SHIFT      0x2
-#define NOC_QOS_PRIORITY_P0_SHIFT      0x3
 
 #define NOC_QOS_MODEn_ADDR(n)          (0xc + (n * 0x1000))
 #define NOC_QOS_MODEn_MASK             0x3
@@ -173,6 +173,16 @@ static const struct clk_bulk_data bus_mm_clocks[] = {
        { .id = "iface" },
 };
 
+static const struct clk_bulk_data bus_a2noc_clocks[] = {
+       { .id = "bus" },
+       { .id = "bus_a" },
+       { .id = "ipa" },
+       { .id = "ufs_axi" },
+       { .id = "aggre2_ufs_axi" },
+       { .id = "aggre2_usb3_axi" },
+       { .id = "cfg_noc_usb2_axi" },
+};
+
 /**
  * struct qcom_icc_provider - Qualcomm specific interconnect provider
  * @provider: generic interconnect provider
@@ -307,7 +317,7 @@ DEFINE_QNODE(slv_bimc_cfg, SDM660_SLAVE_BIMC_CFG, 4, -1, 56, true, -1, 0, -1, 0)
 DEFINE_QNODE(slv_prng, SDM660_SLAVE_PRNG, 4, -1, 44, true, -1, 0, -1, 0);
 DEFINE_QNODE(slv_spdm, SDM660_SLAVE_SPDM, 4, -1, 60, true, -1, 0, -1, 0);
 DEFINE_QNODE(slv_qdss_cfg, SDM660_SLAVE_QDSS_CFG, 4, -1, 63, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_cnoc_mnoc_cfg, SDM660_SLAVE_BLSP_1, 4, -1, 66, true, -1, 0, -1, SDM660_MASTER_CNOC_MNOC_CFG);
+DEFINE_QNODE(slv_cnoc_mnoc_cfg, SDM660_SLAVE_CNOC_MNOC_CFG, 4, -1, 66, true, -1, 0, -1, SDM660_MASTER_CNOC_MNOC_CFG);
 DEFINE_QNODE(slv_snoc_cfg, SDM660_SLAVE_SNOC_CFG, 4, -1, 70, true, -1, 0, -1, 0);
 DEFINE_QNODE(slv_qm_cfg, SDM660_SLAVE_QM_CFG, 4, -1, 212, true, -1, 0, -1, 0);
 DEFINE_QNODE(slv_clk_ctl, SDM660_SLAVE_CLK_CTL, 4, -1, 47, true, -1, 0, -1, 0);
@@ -624,13 +634,12 @@ static int qcom_icc_noc_set_qos_priority(struct regmap *rmap,
        /* Must be updated one at a time, P1 first, P0 last */
        val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT;
        rc = regmap_update_bits(rmap, NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
-                               NOC_QOS_PRIORITY_MASK, val);
+                               NOC_QOS_PRIORITY_P1_MASK, val);
        if (rc)
                return rc;
 
-       val = qos->prio_level << NOC_QOS_PRIORITY_P0_SHIFT;
        return regmap_update_bits(rmap, NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
-                                 NOC_QOS_PRIORITY_MASK, val);
+                                 NOC_QOS_PRIORITY_P0_MASK, qos->prio_level);
 }
 
 static int qcom_icc_set_noc_qos(struct icc_node *src, u64 max_bw)
@@ -810,6 +819,10 @@ static int qnoc_probe(struct platform_device *pdev)
                qp->bus_clks = devm_kmemdup(dev, bus_mm_clocks,
                                            sizeof(bus_mm_clocks), GFP_KERNEL);
                qp->num_clks = ARRAY_SIZE(bus_mm_clocks);
+       } else if (of_device_is_compatible(dev->of_node, "qcom,sdm660-a2noc")) {
+               qp->bus_clks = devm_kmemdup(dev, bus_a2noc_clocks,
+                                           sizeof(bus_a2noc_clocks), GFP_KERNEL);
+               qp->num_clks = ARRAY_SIZE(bus_a2noc_clocks);
        } else {
                if (of_device_is_compatible(dev->of_node, "qcom,sdm660-bimc"))
                        qp->is_bimc_node = true;
index 124c41adeca1875438cffdb9588c9473d86487f6..3eb68fa1b8cc02949fc29468316df560ceaefc31 100644 (file)
@@ -308,7 +308,6 @@ config APPLE_DART
 config ARM_SMMU
        tristate "ARM Ltd. System MMU (SMMU) Support"
        depends on ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64)
-       depends on QCOM_SCM || !QCOM_SCM #if QCOM_SCM=m this can't be =y
        select IOMMU_API
        select IOMMU_IO_PGTABLE_LPAE
        select ARM_DMA_USE_IOMMU if ARM
@@ -356,6 +355,14 @@ config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
          'arm-smmu.disable_bypass' will continue to override this
          config.
 
+config ARM_SMMU_QCOM
+       def_tristate y
+       depends on ARM_SMMU && ARCH_QCOM
+       select QCOM_SCM
+       help
+         When running on a Qualcomm platform that has the custom variant
+         of the ARM SMMU, this needs to be built into the SMMU driver.
+
 config ARM_SMMU_V3
        tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
        depends on ARM64
@@ -438,7 +445,7 @@ config QCOM_IOMMU
        # Note: iommu drivers cannot (yet?) be built as modules
        bool "Qualcomm IOMMU Support"
        depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64)
-       depends on QCOM_SCM=y
+       select QCOM_SCM
        select IOMMU_API
        select IOMMU_IO_PGTABLE_LPAE
        select ARM_DMA_USE_IOMMU
index 559db9259e65c76ebf1eef2e02dd2c61be2de929..fdfa39ec2a4d4a50d87d5fa9502efc7b1170bc61 100644 (file)
@@ -183,7 +183,6 @@ struct apple_dart_master_cfg {
 
 static struct platform_driver apple_dart_driver;
 static const struct iommu_ops apple_dart_iommu_ops;
-static const struct iommu_flush_ops apple_dart_tlb_ops;
 
 static struct apple_dart_domain *to_dart_domain(struct iommu_domain *dom)
 {
@@ -338,22 +337,6 @@ static void apple_dart_iotlb_sync_map(struct iommu_domain *domain,
        apple_dart_domain_flush_tlb(to_dart_domain(domain));
 }
 
-static void apple_dart_tlb_flush_all(void *cookie)
-{
-       apple_dart_domain_flush_tlb(cookie);
-}
-
-static void apple_dart_tlb_flush_walk(unsigned long iova, size_t size,
-                                     size_t granule, void *cookie)
-{
-       apple_dart_domain_flush_tlb(cookie);
-}
-
-static const struct iommu_flush_ops apple_dart_tlb_ops = {
-       .tlb_flush_all = apple_dart_tlb_flush_all,
-       .tlb_flush_walk = apple_dart_tlb_flush_walk,
-};
-
 static phys_addr_t apple_dart_iova_to_phys(struct iommu_domain *domain,
                                           dma_addr_t iova)
 {
@@ -435,7 +418,6 @@ static int apple_dart_finalize_domain(struct iommu_domain *domain,
                .ias = 32,
                .oas = 36,
                .coherent_walk = 1,
-               .tlb = &apple_dart_tlb_ops,
                .iommu_dev = dart->dev,
        };
 
@@ -661,16 +643,34 @@ static int apple_dart_of_xlate(struct device *dev, struct of_phandle_args *args)
        return -EINVAL;
 }
 
+static DEFINE_MUTEX(apple_dart_groups_lock);
+
+static void apple_dart_release_group(void *iommu_data)
+{
+       int i, sid;
+       struct apple_dart_stream_map *stream_map;
+       struct apple_dart_master_cfg *group_master_cfg = iommu_data;
+
+       mutex_lock(&apple_dart_groups_lock);
+
+       for_each_stream_map(i, group_master_cfg, stream_map)
+               for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
+                       stream_map->dart->sid2group[sid] = NULL;
+
+       kfree(iommu_data);
+       mutex_unlock(&apple_dart_groups_lock);
+}
+
 static struct iommu_group *apple_dart_device_group(struct device *dev)
 {
-       static DEFINE_MUTEX(lock);
        int i, sid;
        struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
        struct apple_dart_stream_map *stream_map;
+       struct apple_dart_master_cfg *group_master_cfg;
        struct iommu_group *group = NULL;
        struct iommu_group *res = ERR_PTR(-EINVAL);
 
-       mutex_lock(&lock);
+       mutex_lock(&apple_dart_groups_lock);
 
        for_each_stream_map(i, cfg, stream_map) {
                for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) {
@@ -698,6 +698,20 @@ static struct iommu_group *apple_dart_device_group(struct device *dev)
 #endif
                group = generic_device_group(dev);
 
+       res = ERR_PTR(-ENOMEM);
+       if (!group)
+               goto out;
+
+       group_master_cfg = kzalloc(sizeof(*group_master_cfg), GFP_KERNEL);
+       if (!group_master_cfg) {
+               iommu_group_put(group);
+               goto out;
+       }
+
+       memcpy(group_master_cfg, cfg, sizeof(*group_master_cfg));
+       iommu_group_set_iommudata(group, group_master_cfg,
+               apple_dart_release_group);
+
        for_each_stream_map(i, cfg, stream_map)
                for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
                        stream_map->dart->sid2group[sid] = group;
@@ -705,7 +719,7 @@ static struct iommu_group *apple_dart_device_group(struct device *dev)
        res = group;
 
 out:
-       mutex_unlock(&lock);
+       mutex_unlock(&apple_dart_groups_lock);
        return res;
 }
 
index e240a7bcf3107903689a57833c45c0c9e1e38738..b0cc01aa20c98ec2511ce981403f1ddddb2c77ce 100644 (file)
@@ -1,4 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
 obj-$(CONFIG_ARM_SMMU) += arm_smmu.o
-arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o arm-smmu-qcom.o
+arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o
+arm_smmu-$(CONFIG_ARM_SMMU_QCOM) += arm-smmu-qcom.o
index 9f465e146799faa653eef2e26b764465ea4265c0..2c25cce3806032486c4957c2bdca6e03839fe870 100644 (file)
@@ -215,7 +215,8 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
            of_device_is_compatible(np, "nvidia,tegra186-smmu"))
                return nvidia_smmu_impl_init(smmu);
 
-       smmu = qcom_smmu_impl_init(smmu);
+       if (IS_ENABLED(CONFIG_ARM_SMMU_QCOM))
+               smmu = qcom_smmu_impl_init(smmu);
 
        if (of_device_is_compatible(np, "marvell,ap806-smmu-500"))
                smmu->impl = &mrvl_mmu500_impl;
index 0ec5514c99807f5f46d6c94b8af5ba30fb0e75f3..b7708b93f3fa18819c956d81cf625aeaad1824b5 100644 (file)
@@ -1942,18 +1942,18 @@ static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
        reason = dmar_get_fault_reason(fault_reason, &fault_type);
 
        if (fault_type == INTR_REMAP)
-               pr_err("[INTR-REMAP] Request device [0x%02x:0x%02x.%d] fault index 0x%llx [fault reason 0x%02x] %s\n",
+               pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index 0x%llx [fault reason 0x%02x] %s\n",
                       source_id >> 8, PCI_SLOT(source_id & 0xFF),
                       PCI_FUNC(source_id & 0xFF), addr >> 48,
                       fault_reason, reason);
        else if (pasid == INVALID_IOASID)
-               pr_err("[%s NO_PASID] Request device [0x%02x:0x%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
+               pr_err("[%s NO_PASID] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
                       type ? "DMA Read" : "DMA Write",
                       source_id >> 8, PCI_SLOT(source_id & 0xFF),
                       PCI_FUNC(source_id & 0xFF), addr,
                       fault_reason, reason);
        else
-               pr_err("[%s PASID 0x%x] Request device [0x%02x:0x%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
+               pr_err("[%s PASID 0x%x] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
                       type ? "DMA Read" : "DMA Write", pasid,
                       source_id >> 8, PCI_SLOT(source_id & 0xFF),
                       PCI_FUNC(source_id & 0xFF), addr,
index c14e65a5d38f013d3e129a17304883800cc307a2..c709861198e593ecb5f359772eeb1bb44add21ea 100644 (file)
@@ -33,6 +33,7 @@ struct ipoctal_channel {
        unsigned int                    pointer_read;
        unsigned int                    pointer_write;
        struct tty_port                 tty_port;
+       bool                            tty_registered;
        union scc2698_channel __iomem   *regs;
        union scc2698_block __iomem     *block_regs;
        unsigned int                    board_id;
@@ -81,22 +82,34 @@ static int ipoctal_port_activate(struct tty_port *port, struct tty_struct *tty)
        return 0;
 }
 
-static int ipoctal_open(struct tty_struct *tty, struct file *file)
+static int ipoctal_install(struct tty_driver *driver, struct tty_struct *tty)
 {
        struct ipoctal_channel *channel = dev_get_drvdata(tty->dev);
        struct ipoctal *ipoctal = chan_to_ipoctal(channel, tty->index);
-       int err;
-
-       tty->driver_data = channel;
+       int res;
 
        if (!ipack_get_carrier(ipoctal->dev))
                return -EBUSY;
 
-       err = tty_port_open(&channel->tty_port, tty, file);
-       if (err)
-               ipack_put_carrier(ipoctal->dev);
+       res = tty_standard_install(driver, tty);
+       if (res)
+               goto err_put_carrier;
+
+       tty->driver_data = channel;
+
+       return 0;
+
+err_put_carrier:
+       ipack_put_carrier(ipoctal->dev);
+
+       return res;
+}
+
+static int ipoctal_open(struct tty_struct *tty, struct file *file)
+{
+       struct ipoctal_channel *channel = tty->driver_data;
 
-       return err;
+       return tty_port_open(&channel->tty_port, tty, file);
 }
 
 static void ipoctal_reset_stats(struct ipoctal_stats *stats)
@@ -264,7 +277,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
        int res;
        int i;
        struct tty_driver *tty;
-       char name[20];
        struct ipoctal_channel *channel;
        struct ipack_region *region;
        void __iomem *addr;
@@ -355,8 +367,11 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
        /* Fill struct tty_driver with ipoctal data */
        tty->owner = THIS_MODULE;
        tty->driver_name = KBUILD_MODNAME;
-       sprintf(name, KBUILD_MODNAME ".%d.%d.", bus_nr, slot);
-       tty->name = name;
+       tty->name = kasprintf(GFP_KERNEL, KBUILD_MODNAME ".%d.%d.", bus_nr, slot);
+       if (!tty->name) {
+               res = -ENOMEM;
+               goto err_put_driver;
+       }
        tty->major = 0;
 
        tty->minor_start = 0;
@@ -371,8 +386,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
        res = tty_register_driver(tty);
        if (res) {
                dev_err(&ipoctal->dev->dev, "Can't register tty driver.\n");
-               tty_driver_kref_put(tty);
-               return res;
+               goto err_free_name;
        }
 
        /* Save struct tty_driver for use it when uninstalling the device */
@@ -383,7 +397,9 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
 
                channel = &ipoctal->channel[i];
                tty_port_init(&channel->tty_port);
-               tty_port_alloc_xmit_buf(&channel->tty_port);
+               res = tty_port_alloc_xmit_buf(&channel->tty_port);
+               if (res)
+                       continue;
                channel->tty_port.ops = &ipoctal_tty_port_ops;
 
                ipoctal_reset_stats(&channel->stats);
@@ -391,13 +407,15 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
                spin_lock_init(&channel->lock);
                channel->pointer_read = 0;
                channel->pointer_write = 0;
-               tty_dev = tty_port_register_device(&channel->tty_port, tty, i, NULL);
+               tty_dev = tty_port_register_device_attr(&channel->tty_port, tty,
+                                                       i, NULL, channel, NULL);
                if (IS_ERR(tty_dev)) {
                        dev_err(&ipoctal->dev->dev, "Failed to register tty device.\n");
+                       tty_port_free_xmit_buf(&channel->tty_port);
                        tty_port_destroy(&channel->tty_port);
                        continue;
                }
-               dev_set_drvdata(tty_dev, channel);
+               channel->tty_registered = true;
        }
 
        /*
@@ -409,6 +427,13 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
                                       ipoctal_irq_handler, ipoctal);
 
        return 0;
+
+err_free_name:
+       kfree(tty->name);
+err_put_driver:
+       tty_driver_kref_put(tty);
+
+       return res;
 }
 
 static inline int ipoctal_copy_write_buffer(struct ipoctal_channel *channel,
@@ -648,6 +673,7 @@ static void ipoctal_cleanup(struct tty_struct *tty)
 
 static const struct tty_operations ipoctal_fops = {
        .ioctl =                NULL,
+       .install =              ipoctal_install,
        .open =                 ipoctal_open,
        .close =                ipoctal_close,
        .write =                ipoctal_write_tty,
@@ -690,12 +716,17 @@ static void __ipoctal_remove(struct ipoctal *ipoctal)
 
        for (i = 0; i < NR_CHANNELS; i++) {
                struct ipoctal_channel *channel = &ipoctal->channel[i];
+
+               if (!channel->tty_registered)
+                       continue;
+
                tty_unregister_device(ipoctal->tty_drv, i);
                tty_port_free_xmit_buf(&channel->tty_port);
                tty_port_destroy(&channel->tty_port);
        }
 
        tty_unregister_driver(ipoctal->tty_drv);
+       kfree(ipoctal->tty_drv->name);
        tty_driver_kref_put(ipoctal->tty_drv);
        kfree(ipoctal);
 }
index cb0afe8971623668b232173e38dc7aad63d887c4..7313454e403a630d64196a07bfc805891753f00f 100644 (file)
@@ -480,6 +480,11 @@ int detach_capi_ctr(struct capi_ctr *ctr)
 
        ctr_down(ctr, CAPI_CTR_DETACHED);
 
+       if (ctr->cnr < 1 || ctr->cnr - 1 >= CAPI_MAXCONTR) {
+               err = -EINVAL;
+               goto unlock_out;
+       }
+
        if (capi_controller[ctr->cnr - 1] != ctr) {
                err = -EINVAL;
                goto unlock_out;
index e501cb03f211dee02a0f32be36a7017edfaef970..bd087cca1c1d2d0aca56d7479598d0c2f50c228b 100644 (file)
@@ -1994,14 +1994,14 @@ setup_hw(struct hfc_pci *hc)
        pci_set_master(hc->pdev);
        if (!hc->irq) {
                printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
-               return 1;
+               return -EINVAL;
        }
        hc->hw.pci_io =
                (char __iomem *)(unsigned long)hc->pdev->resource[1].start;
 
        if (!hc->hw.pci_io) {
                printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
-               return 1;
+               return -ENOMEM;
        }
        /* Allocate memory for FIFOS */
        /* the memory needs to be on a 32k boundary within the first 4G */
@@ -2012,7 +2012,7 @@ setup_hw(struct hfc_pci *hc)
        if (!buffer) {
                printk(KERN_WARNING
                       "HFC-PCI: Error allocating memory for FIFO!\n");
-               return 1;
+               return -ENOMEM;
        }
        hc->hw.fifos = buffer;
        pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle);
@@ -2022,7 +2022,7 @@ setup_hw(struct hfc_pci *hc)
                       "HFC-PCI: Error in ioremap for PCI!\n");
                dma_free_coherent(&hc->pdev->dev, 0x8000, hc->hw.fifos,
                                  hc->hw.dmahandle);
-               return 1;
+               return -ENOMEM;
        }
 
        printk(KERN_INFO
index 2a1ddd47a0968da8db09c9800b5219547ca5e43f..a52f275f826348475b0f49a241ee73b2c6553fca 100644 (file)
@@ -949,8 +949,8 @@ nj_release(struct tiger_hw *card)
                nj_disable_hwirq(card);
                mode_tiger(&card->bc[0], ISDN_P_NONE);
                mode_tiger(&card->bc[1], ISDN_P_NONE);
-               card->isac.release(&card->isac);
                spin_unlock_irqrestore(&card->lock, flags);
+               card->isac.release(&card->isac);
                release_region(card->base, card->base_s);
                card->base_s = 0;
        }
index 84dbe08ad20536b264566ba8ddb133beb441897c..edd22e4d65dffd41d097e8f96d3659303c13425a 100644 (file)
@@ -161,7 +161,7 @@ static const char *clone_device_name(struct clone *clone)
 
 static void __set_clone_mode(struct clone *clone, enum clone_metadata_mode new_mode)
 {
-       const char *descs[] = {
+       static const char * const descs[] = {
                "read-write",
                "read-only",
                "fail"
index 5b95eea517d1b95ed543df0afcd83792c088b086..a896dea9750e434655b9ebef4363da84b59be91e 100644 (file)
@@ -490,6 +490,14 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct mapped_device *md = tio->md;
        struct dm_target *ti = md->immutable_target;
 
+       /*
+        * blk-mq's unquiesce may come from outside events, such as
+        * elevator switch, updating nr_requests or others, and request may
+        * come during suspend, so simply ask for blk-mq to requeue it.
+        */
+       if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)))
+               return BLK_STS_RESOURCE;
+
        if (unlikely(!ti)) {
                int srcu_idx;
                struct dm_table *map = dm_get_live_table(md, &srcu_idx);
index 22a5ac82446a6c3ce16ccd360e3ab42ceb9a2bed..88288c8d6bc8c0dddcda66d52688382219bd8f78 100644 (file)
@@ -475,6 +475,7 @@ static int verity_verify_io(struct dm_verity_io *io)
        struct bvec_iter start;
        unsigned b;
        struct crypto_wait wait;
+       struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
 
        for (b = 0; b < io->n_blocks; b++) {
                int r;
@@ -529,9 +530,17 @@ static int verity_verify_io(struct dm_verity_io *io)
                else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
                                           cur_block, NULL, &start) == 0)
                        continue;
-               else if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
-                                          cur_block))
-                       return -EIO;
+               else {
+                       if (bio->bi_status) {
+                               /*
+                                * Error correction failed; Just return error
+                                */
+                               return -EIO;
+                       }
+                       if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
+                                             cur_block))
+                               return -EIO;
+               }
        }
 
        return 0;
index a011d09cb0faca9f2acd6b1c3208169724ae3f73..76d9da49fda7563bf45a834243c46f4c691104af 100644 (file)
@@ -496,18 +496,17 @@ static void start_io_acct(struct dm_io *io)
                                    false, 0, &io->stats_aux);
 }
 
-static void end_io_acct(struct dm_io *io)
+static void end_io_acct(struct mapped_device *md, struct bio *bio,
+                       unsigned long start_time, struct dm_stats_aux *stats_aux)
 {
-       struct mapped_device *md = io->md;
-       struct bio *bio = io->orig_bio;
-       unsigned long duration = jiffies - io->start_time;
+       unsigned long duration = jiffies - start_time;
 
-       bio_end_io_acct(bio, io->start_time);
+       bio_end_io_acct(bio, start_time);
 
        if (unlikely(dm_stats_used(&md->stats)))
                dm_stats_account_io(&md->stats, bio_data_dir(bio),
                                    bio->bi_iter.bi_sector, bio_sectors(bio),
-                                   true, duration, &io->stats_aux);
+                                   true, duration, stats_aux);
 
        /* nudge anyone waiting on suspend queue */
        if (unlikely(wq_has_sleeper(&md->wait)))
@@ -790,6 +789,8 @@ void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
        blk_status_t io_error;
        struct bio *bio;
        struct mapped_device *md = io->md;
+       unsigned long start_time = 0;
+       struct dm_stats_aux stats_aux;
 
        /* Push-back supersedes any I/O errors */
        if (unlikely(error)) {
@@ -821,8 +822,10 @@ void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
                }
 
                io_error = io->status;
-               end_io_acct(io);
+               start_time = io->start_time;
+               stats_aux = io->stats_aux;
                free_io(md, io);
+               end_io_acct(md, bio, start_time, &stats_aux);
 
                if (io_error == BLK_STS_DM_REQUEUE)
                        return;
index 157c924686e4b61b4869aa57e95ee88a1563cc70..80321e03809aca4995a8db0428e54e6f1a2c4009 100644 (file)
@@ -565,7 +565,7 @@ config VIDEO_QCOM_VENUS
        depends on VIDEO_DEV && VIDEO_V4L2 && QCOM_SMEM
        depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
        select QCOM_MDT_LOADER if ARCH_QCOM
-       select QCOM_SCM if ARCH_QCOM
+       select QCOM_SCM
        select VIDEOBUF2_DMA_CONTIG
        select V4L2_MEM2MEM_DEV
        help
index d402e456f27df676b767281ecd4cf7254d978a1d..7d0ab19c38bb91458ddcc430c686a0b239b03465 100644 (file)
@@ -1140,8 +1140,8 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
                        continue;
                length = 0;
                switch (c) {
-               /* SOF0: baseline JPEG */
-               case SOF0:
+               /* JPEG_MARKER_SOF0: baseline JPEG */
+               case JPEG_MARKER_SOF0:
                        if (get_word_be(&jpeg_buffer, &word))
                                break;
                        length = (long)word - 2;
@@ -1172,7 +1172,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
                        notfound = 0;
                        break;
 
-               case DQT:
+               case JPEG_MARKER_DQT:
                        if (get_word_be(&jpeg_buffer, &word))
                                break;
                        length = (long)word - 2;
@@ -1185,7 +1185,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
                        skip(&jpeg_buffer, length);
                        break;
 
-               case DHT:
+               case JPEG_MARKER_DHT:
                        if (get_word_be(&jpeg_buffer, &word))
                                break;
                        length = (long)word - 2;
@@ -1198,15 +1198,15 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
                        skip(&jpeg_buffer, length);
                        break;
 
-               case SOS:
+               case JPEG_MARKER_SOS:
                        sos = jpeg_buffer.curr - 2; /* 0xffda */
                        break;
 
                /* skip payload-less markers */
-               case RST ... RST + 7:
-               case SOI:
-               case EOI:
-               case TEM:
+               case JPEG_MARKER_RST ... JPEG_MARKER_RST + 7:
+               case JPEG_MARKER_SOI:
+               case JPEG_MARKER_EOI:
+               case JPEG_MARKER_TEM:
                        break;
 
                /* skip uninteresting payload markers */
index a77d93c098ce74d08cb5f2254b83fb3c845a01d7..8473a019bb5f276624af2540129d5a2016b2f900 100644 (file)
 #define EXYNOS3250_IRQ_TIMEOUT         0x10000000
 
 /* a selection of JPEG markers */
-#define TEM                            0x01
-#define SOF0                           0xc0
-#define DHT                            0xc4
-#define RST                            0xd0
-#define SOI                            0xd8
-#define EOI                            0xd9
-#define        SOS                             0xda
-#define DQT                            0xdb
-#define DHP                            0xde
+#define JPEG_MARKER_TEM                                0x01
+#define JPEG_MARKER_SOF0                               0xc0
+#define JPEG_MARKER_DHT                                0xc4
+#define JPEG_MARKER_RST                                0xd0
+#define JPEG_MARKER_SOI                                0xd8
+#define JPEG_MARKER_EOI                                0xd9
+#define        JPEG_MARKER_SOS                         0xda
+#define JPEG_MARKER_DQT                                0xdb
+#define JPEG_MARKER_DHP                                0xde
 
 /* Flags that indicate a format can be used for capture/output */
 #define SJPEG_FMT_FLAG_ENC_CAPTURE     (1 << 0)
@@ -187,11 +187,11 @@ struct s5p_jpeg_marker {
  * @fmt:       driver-specific format of this queue
  * @w:         image width
  * @h:         image height
- * @sos:       SOS marker's position relative to the buffer beginning
- * @dht:       DHT markers' positions relative to the buffer beginning
- * @dqt:       DQT markers' positions relative to the buffer beginning
- * @sof:       SOF0 marker's position relative to the buffer beginning
- * @sof_len:   SOF0 marker's payload length (without length field itself)
+ * @sos:       JPEG_MARKER_SOS's position relative to the buffer beginning
+ * @dht:       JPEG_MARKER_DHT' positions relative to the buffer beginning
+ * @dqt:       JPEG_MARKER_DQT' positions relative to the buffer beginning
+ * @sof:       JPEG_MARKER_SOF0's position relative to the buffer beginning
+ * @sof_len:   JPEG_MARKER_SOF0's payload length (without length field itself)
  * @size:      image buffer size in bytes
  */
 struct s5p_jpeg_q_data {
index 3e729a17b35ff9dab6651c150d2230d35f4c62df..48d52baec1a1c981f2cd0122d8fe03911c7373cd 100644 (file)
@@ -24,6 +24,7 @@ static const u8 COMMAND_VERSION[] = { 'v' };
 // End transmit and repeat reset command so we exit sump mode
 static const u8 COMMAND_RESET[] = { 0xff, 0xff, 0, 0, 0, 0, 0 };
 static const u8 COMMAND_SMODE_ENTER[] = { 's' };
+static const u8 COMMAND_SMODE_EXIT[] = { 0 };
 static const u8 COMMAND_TXSTART[] = { 0x26, 0x24, 0x25, 0x03 };
 
 #define REPLY_XMITCOUNT 't'
@@ -309,12 +310,30 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
                buf[i] = cpu_to_be16(v);
        }
 
-       buf[count] = cpu_to_be16(0xffff);
+       buf[count] = 0xffff;
 
        irtoy->tx_buf = buf;
        irtoy->tx_len = size;
        irtoy->emitted = 0;
 
+       // There is an issue where if the unit is receiving IR while the
+       // first TXSTART command is sent, the device might end up hanging
+       // with its led on. It does not respond to any command when this
+       // happens. To work around this, re-enter sample mode.
+       err = irtoy_command(irtoy, COMMAND_SMODE_EXIT,
+                           sizeof(COMMAND_SMODE_EXIT), STATE_RESET);
+       if (err) {
+               dev_err(irtoy->dev, "exit sample mode: %d\n", err);
+               return err;
+       }
+
+       err = irtoy_command(irtoy, COMMAND_SMODE_ENTER,
+                           sizeof(COMMAND_SMODE_ENTER), STATE_COMMAND);
+       if (err) {
+               dev_err(irtoy->dev, "enter sample mode: %d\n", err);
+               return err;
+       }
+
        err = irtoy_command(irtoy, COMMAND_TXSTART, sizeof(COMMAND_TXSTART),
                            STATE_TX);
        kfree(buf);
index 85ba901bc11b0ad0f57217e756c1ab0f3f2b4985..0f5a49fc7c9e0e1029086883415fcb8dd3007d56 100644 (file)
@@ -224,6 +224,7 @@ config HI6421V600_IRQ
        tristate "HiSilicon Hi6421v600 IRQ and powerkey"
        depends on OF
        depends on SPMI
+       depends on HAS_IOMEM
        select MFD_CORE
        select REGMAP_SPMI
        help
index e5a4ed3701eb8e3d858eee4026ea5a299068542a..a798fad5f03c28f707814cc56c8a8042f0f3079b 100644 (file)
@@ -47,7 +47,7 @@ static inline bool needs_unaligned_copy(const void *ptr)
 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
        return false;
 #else
-       return ((ptr - NULL) & 3) != 0;
+       return ((uintptr_t)ptr & 3) != 0;
 #endif
 }
 
index 4d09b672ac3c8a6e70b34f2802ff7c0de7bb8da6..632325474233a11f0916047ed42338a9eb342d76 100644 (file)
@@ -366,6 +366,13 @@ static const struct of_device_id at25_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, at25_of_match);
 
+static const struct spi_device_id at25_spi_ids[] = {
+       { .name = "at25",},
+       { .name = "fm25",},
+       { }
+};
+MODULE_DEVICE_TABLE(spi, at25_spi_ids);
+
 static int at25_probe(struct spi_device *spi)
 {
        struct at25_data        *at25 = NULL;
@@ -491,6 +498,7 @@ static struct spi_driver at25_driver = {
                .dev_groups     = sernum_groups,
        },
        .probe          = at25_probe,
+       .id_table       = at25_spi_ids,
 };
 
 module_spi_driver(at25_driver);
index 29d8971ec558bff437e4bea69af3ca7d58ae3f83..1f15399e5cb49199f5f4b467f8ff8ef8fb6771f8 100644 (file)
@@ -406,6 +406,23 @@ static const struct of_device_id eeprom_93xx46_of_table[] = {
 };
 MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table);
 
+static const struct spi_device_id eeprom_93xx46_spi_ids[] = {
+       { .name = "eeprom-93xx46",
+         .driver_data = (kernel_ulong_t)&at93c46_data, },
+       { .name = "at93c46",
+         .driver_data = (kernel_ulong_t)&at93c46_data, },
+       { .name = "at93c46d",
+         .driver_data = (kernel_ulong_t)&atmel_at93c46d_data, },
+       { .name = "at93c56",
+         .driver_data = (kernel_ulong_t)&at93c56_data, },
+       { .name = "at93c66",
+         .driver_data = (kernel_ulong_t)&at93c66_data, },
+       { .name = "93lc46b",
+         .driver_data = (kernel_ulong_t)&microchip_93lc46b_data, },
+       {}
+};
+MODULE_DEVICE_TABLE(spi, eeprom_93xx46_spi_ids);
+
 static int eeprom_93xx46_probe_dt(struct spi_device *spi)
 {
        const struct of_device_id *of_id =
@@ -555,6 +572,7 @@ static struct spi_driver eeprom_93xx46_driver = {
        },
        .probe          = eeprom_93xx46_probe,
        .remove         = eeprom_93xx46_remove,
+       .id_table       = eeprom_93xx46_spi_ids,
 };
 
 module_spi_driver(eeprom_93xx46_driver);
index beda610e6b30d139d2ccf53d4fed655ee3952f4b..ad6ced4546556a6a2db25152e89c8eec45b05cd6 100644 (file)
@@ -814,10 +814,12 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
                        rpra[i].pv = (u64) ctx->args[i].ptr;
                        pages[i].addr = ctx->maps[i]->phys;
 
+                       mmap_read_lock(current->mm);
                        vma = find_vma(current->mm, ctx->args[i].ptr);
                        if (vma)
                                pages[i].addr += ctx->args[i].ptr -
                                                 vma->vm_start;
+                       mmap_read_unlock(current->mm);
 
                        pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
                        pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
index 02f33bc60c56e4692291387985d9b913ad2e9250..4c9c5394da6fa62bb3d0fb29f6e54d695314ba32 100644 (file)
@@ -539,6 +539,7 @@ static int gehc_achc_probe(struct spi_device *spi)
 
 static const struct spi_device_id gehc_achc_id[] = {
        { "ge,achc", 0 },
+       { "achc", 0 },
        { }
 };
 MODULE_DEVICE_TABLE(spi, gehc_achc_id);
index 91b57544f7c6657750c4f0175b36e23fe2959eec..6dafff375f1c6e614010552e79f5b52292cfb1af 100644 (file)
@@ -2649,11 +2649,18 @@ put_ctx:
 free_seq_arr:
        kfree(cs_seq_arr);
 
-       /* update output args */
-       memset(args, 0, sizeof(*args));
        if (rc)
                return rc;
 
+       if (mcs_data.wait_status == -ERESTARTSYS) {
+               dev_err_ratelimited(hdev->dev,
+                               "user process got signal while waiting for Multi-CS\n");
+               return -EINTR;
+       }
+
+       /* update output args */
+       memset(args, 0, sizeof(*args));
+
        if (mcs_data.completion_bitmap) {
                args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
                args->out.cs_completion_map = mcs_data.completion_bitmap;
@@ -2667,8 +2674,6 @@ free_seq_arr:
                /* update if some CS was gone */
                if (mcs_data.timestamp)
                        args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
-       } else if (mcs_data.wait_status == -ERESTARTSYS) {
-               args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
        } else {
                args->out.status = HL_WAIT_CS_STATUS_BUSY;
        }
@@ -2688,16 +2693,17 @@ static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
        rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq,
                                &status, &timestamp);
 
+       if (rc == -ERESTARTSYS) {
+               dev_err_ratelimited(hdev->dev,
+                       "user process got signal while waiting for CS handle %llu\n",
+                       seq);
+               return -EINTR;
+       }
+
        memset(args, 0, sizeof(*args));
 
        if (rc) {
-               if (rc == -ERESTARTSYS) {
-                       dev_err_ratelimited(hdev->dev,
-                               "user process got signal while waiting for CS handle %llu\n",
-                               seq);
-                       args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
-                       rc = -EINTR;
-               } else if (rc == -ETIMEDOUT) {
+               if (rc == -ETIMEDOUT) {
                        dev_err_ratelimited(hdev->dev,
                                "CS %llu has timed-out while user process is waiting for it\n",
                                seq);
@@ -2823,7 +2829,6 @@ wait_again:
                dev_err_ratelimited(hdev->dev,
                        "user process got signal while waiting for interrupt ID %d\n",
                        interrupt->interrupt_id);
-               *status = HL_WAIT_CS_STATUS_INTERRUPTED;
                rc = -EINTR;
        } else {
                *status = CS_WAIT_STATUS_BUSY;
@@ -2878,8 +2883,6 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
                                args->in.interrupt_timeout_us, args->in.addr,
                                args->in.target, interrupt_offset, &status);
 
-       memset(args, 0, sizeof(*args));
-
        if (rc) {
                if (rc != -EINTR)
                        dev_err_ratelimited(hdev->dev,
@@ -2888,6 +2891,8 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
                return rc;
        }
 
+       memset(args, 0, sizeof(*args));
+
        switch (status) {
        case CS_WAIT_STATUS_COMPLETED:
                args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
index 99b5c1ecc444194e378c1666f242aabb0430ba5d..be41843df75bcb029dc34af8c13c0d3e0c905443 100644 (file)
@@ -1298,7 +1298,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
 
                if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
                    dev->hbm_state != MEI_HBM_STARTING) {
-                       if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+                       if (dev->dev_state == MEI_DEV_POWER_DOWN ||
+                           dev->dev_state == MEI_DEV_POWERING_DOWN) {
                                dev_dbg(dev->dev, "hbm: start: on shutdown, ignoring\n");
                                return 0;
                        }
@@ -1381,7 +1382,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
 
                if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
                    dev->hbm_state != MEI_HBM_DR_SETUP) {
-                       if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+                       if (dev->dev_state == MEI_DEV_POWER_DOWN ||
+                           dev->dev_state == MEI_DEV_POWERING_DOWN) {
                                dev_dbg(dev->dev, "hbm: dma setup response: on shutdown, ignoring\n");
                                return 0;
                        }
@@ -1448,7 +1450,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
 
                if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
                    dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) {
-                       if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+                       if (dev->dev_state == MEI_DEV_POWER_DOWN ||
+                           dev->dev_state == MEI_DEV_POWERING_DOWN) {
                                dev_dbg(dev->dev, "hbm: properties response: on shutdown, ignoring\n");
                                return 0;
                        }
@@ -1490,7 +1493,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
 
                if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
                    dev->hbm_state != MEI_HBM_ENUM_CLIENTS) {
-                       if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+                       if (dev->dev_state == MEI_DEV_POWER_DOWN ||
+                           dev->dev_state == MEI_DEV_POWERING_DOWN) {
                                dev_dbg(dev->dev, "hbm: enumeration response: on shutdown, ignoring\n");
                                return 0;
                        }
index cb34925e10f15d6cd65b79b48c4870257b9e6189..67bb6a25fd0a020c55c310fcd171b1fc566982f9 100644 (file)
@@ -92,6 +92,7 @@
 #define MEI_DEV_ID_CDF        0x18D3  /* Cedar Fork */
 
 #define MEI_DEV_ID_ICP_LP     0x34E0  /* Ice Lake Point LP */
+#define MEI_DEV_ID_ICP_N      0x38E0  /* Ice Lake Point N */
 
 #define MEI_DEV_ID_JSP_N      0x4DE0  /* Jasper Lake Point N */
 
index c3393b383e5989bf6ed3be487ba219a8b7fcaeb2..3a45aaf002ac8523e3955e23e21603acfffeb280 100644 (file)
@@ -96,6 +96,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
        {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_ITOUCH_CFG)},
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_N, MEI_ME_PCH12_CFG)},
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_H, MEI_ME_PCH15_SPS_CFG)},
index 71313961cc54db98ba2ed90ecd939070ae625bd6..95b3511b056052c07dfc11cabc79ce8afcee716d 100644 (file)
@@ -547,7 +547,7 @@ config MMC_SDHCI_MSM
        depends on MMC_SDHCI_PLTFM
        select MMC_SDHCI_IO_ACCESSORS
        select MMC_CQHCI
-       select QCOM_SCM if MMC_CRYPTO && ARCH_QCOM
+       select QCOM_SCM if MMC_CRYPTO
        help
          This selects the Secure Digital Host Controller Interface (SDHCI)
          support present in Qualcomm SOCs. The controller supports
index 6578cc64ae9e80caadf8d44ef18ba2ac7d33e891..380f9aa56eb26a76877526c8f61cbd16eb785f2a 100644 (file)
@@ -1802,10 +1802,15 @@ static enum hrtimer_restart dw_mci_fault_timer(struct hrtimer *t)
 
        spin_lock_irqsave(&host->irq_lock, flags);
 
-       if (!host->data_status)
+       /*
+        * Only inject an error if we haven't already got an error or data over
+        * interrupt.
+        */
+       if (!host->data_status) {
                host->data_status = SDMMC_INT_DCRC;
-       set_bit(EVENT_DATA_ERROR, &host->pending_events);
-       tasklet_schedule(&host->tasklet);
+               set_bit(EVENT_DATA_ERROR, &host->pending_events);
+               tasklet_schedule(&host->tasklet);
+       }
 
        spin_unlock_irqrestore(&host->irq_lock, flags);
 
@@ -2721,12 +2726,16 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
                }
 
                if (pending & DW_MCI_DATA_ERROR_FLAGS) {
+                       spin_lock(&host->irq_lock);
+
                        /* if there is an error report DATA_ERROR */
                        mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
                        host->data_status = pending;
                        smp_wmb(); /* drain writebuffer */
                        set_bit(EVENT_DATA_ERROR, &host->pending_events);
                        tasklet_schedule(&host->tasklet);
+
+                       spin_unlock(&host->irq_lock);
                }
 
                if (pending & SDMMC_INT_DATA_OVER) {
index 3f28eb4d17fe79a9c13f37ff0d5cf5afb11a4db0..8f36536cb1b6d0d33242e9287104851e95b92bf0 100644 (file)
@@ -746,7 +746,7 @@ static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg)
        writel(start, host->regs + SD_EMMC_START);
 }
 
-/* local sg copy to buffer version with _to/fromio usage for dram_access_quirk */
+/* local sg copy for dram_access_quirk */
 static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data,
                                  size_t buflen, bool to_buffer)
 {
@@ -764,21 +764,27 @@ static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data
        sg_miter_start(&miter, sgl, nents, sg_flags);
 
        while ((offset < buflen) && sg_miter_next(&miter)) {
-               unsigned int len;
+               unsigned int buf_offset = 0;
+               unsigned int len, left;
+               u32 *buf = miter.addr;
 
                len = min(miter.length, buflen - offset);
+               left = len;
 
-               /* When dram_access_quirk, the bounce buffer is a iomem mapping */
-               if (host->dram_access_quirk) {
-                       if (to_buffer)
-                               memcpy_toio(host->bounce_iomem_buf + offset, miter.addr, len);
-                       else
-                               memcpy_fromio(miter.addr, host->bounce_iomem_buf + offset, len);
+               if (to_buffer) {
+                       do {
+                               writel(*buf++, host->bounce_iomem_buf + offset + buf_offset);
+
+                               buf_offset += 4;
+                               left -= 4;
+                       } while (left);
                } else {
-                       if (to_buffer)
-                               memcpy(host->bounce_buf + offset, miter.addr, len);
-                       else
-                               memcpy(miter.addr, host->bounce_buf + offset, len);
+                       do {
+                               *buf++ = readl(host->bounce_iomem_buf + offset + buf_offset);
+
+                               buf_offset += 4;
+                               left -= 4;
+                       } while (left);
                }
 
                offset += len;
@@ -830,7 +836,11 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
                if (data->flags & MMC_DATA_WRITE) {
                        cmd_cfg |= CMD_CFG_DATA_WR;
                        WARN_ON(xfer_bytes > host->bounce_buf_size);
-                       meson_mmc_copy_buffer(host, data, xfer_bytes, true);
+                       if (host->dram_access_quirk)
+                               meson_mmc_copy_buffer(host, data, xfer_bytes, true);
+                       else
+                               sg_copy_to_buffer(data->sg, data->sg_len,
+                                                 host->bounce_buf, xfer_bytes);
                        dma_wmb();
                }
 
@@ -849,12 +859,43 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
        writel(cmd->arg, host->regs + SD_EMMC_CMD_ARG);
 }
 
+static int meson_mmc_validate_dram_access(struct mmc_host *mmc, struct mmc_data *data)
+{
+       struct scatterlist *sg;
+       int i;
+
+       /* Reject request if any element offset or size is not 32bit aligned */
+       for_each_sg(data->sg, sg, data->sg_len, i) {
+               if (!IS_ALIGNED(sg->offset, sizeof(u32)) ||
+                   !IS_ALIGNED(sg->length, sizeof(u32))) {
+                       dev_err(mmc_dev(mmc), "unaligned sg offset %u len %u\n",
+                               data->sg->offset, data->sg->length);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
 static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
 {
        struct meson_host *host = mmc_priv(mmc);
        bool needs_pre_post_req = mrq->data &&
                        !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE);
 
+       /*
+        * The memory at the end of the controller used as bounce buffer for
+        * the dram_access_quirk only accepts 32bit read/write access,
+        * check the aligment and length of the data before starting the request.
+        */
+       if (host->dram_access_quirk && mrq->data) {
+               mrq->cmd->error = meson_mmc_validate_dram_access(mmc, mrq->data);
+               if (mrq->cmd->error) {
+                       mmc_request_done(mmc, mrq);
+                       return;
+               }
+       }
+
        if (needs_pre_post_req) {
                meson_mmc_get_transfer_mode(mmc, mrq);
                if (!meson_mmc_desc_chain_mode(mrq->data))
@@ -999,7 +1040,11 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
        if (meson_mmc_bounce_buf_read(data)) {
                xfer_bytes = data->blksz * data->blocks;
                WARN_ON(xfer_bytes > host->bounce_buf_size);
-               meson_mmc_copy_buffer(host, data, xfer_bytes, false);
+               if (host->dram_access_quirk)
+                       meson_mmc_copy_buffer(host, data, xfer_bytes, false);
+               else
+                       sg_copy_from_buffer(data->sg, data->sg_len,
+                                           host->bounce_buf, xfer_bytes);
        }
 
        next_cmd = meson_mmc_get_next_command(cmd);
index 6fc4cf3c9dce101cd97b9a1718ae19bdb000ccec..a4407f391f66a6f2f05bebad1334748895c9fe77 100644 (file)
@@ -561,6 +561,8 @@ static void renesas_sdhi_reset(struct tmio_mmc_host *host)
                /* Unknown why but without polling reset status, it will hang */
                read_poll_timeout(reset_control_status, ret, ret == 0, 1, 100,
                                  false, priv->rstc);
+               /* At least SDHI_VER_GEN2_SDR50 needs manual release of reset */
+               sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
                priv->needs_adjust_hs400 = false;
                renesas_sdhi_set_clock(host, host->clk_cache);
        } else if (priv->scc_ctl) {
index 5564d7b23e7cd9861fc90ab0c720ddb06f6a65ef..d1a1c548c515f07a4cebce87dd0637177f39197f 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/kernel.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/slot-gpio.h>
@@ -61,7 +62,6 @@ static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
 static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
 {
        u16 clk;
-       unsigned long timeout;
 
        host->mmc->actual_clock = 0;
 
@@ -86,16 +86,11 @@ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
        sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
 
        /* Wait max 20 ms */
-       timeout = 20;
-       while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
-               & SDHCI_CLOCK_INT_STABLE)) {
-               if (timeout == 0) {
-                       pr_err("%s: Internal clock never stabilised.\n",
-                              mmc_hostname(host->mmc));
-                       return;
-               }
-               timeout--;
-               mdelay(1);
+       if (read_poll_timeout(sdhci_readw, clk, (clk & SDHCI_CLOCK_INT_STABLE),
+                             1000, 20000, false, host, SDHCI_CLOCK_CONTROL)) {
+               pr_err("%s: Internal clock never stabilised.\n",
+                      mmc_hostname(host->mmc));
+               return;
        }
 
        clk |= SDHCI_CLOCK_CARD_EN;
@@ -114,6 +109,7 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
 {
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
        struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
+       unsigned int tmp;
 
        sdhci_reset(host, mask);
 
@@ -126,6 +122,10 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
 
                sdhci_writel(host, calcr | SDMMC_CALCR_ALWYSON | SDMMC_CALCR_EN,
                             SDMMC_CALCR);
+
+               if (read_poll_timeout(sdhci_readl, tmp, !(tmp & SDMMC_CALCR_EN),
+                                     10, 20000, false, host, SDMMC_CALCR))
+                       dev_err(mmc_dev(host->mmc), "Failed to calibrate\n");
        }
 }
 
index ef0badea4f4158e9b136fa2274a87f7006ff20a1..04e6f7b26706482883fb3fe681200febc23fc92e 100644 (file)
@@ -1676,13 +1676,17 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
        struct nand_ecc_ctrl *ecc = &chip->ecc;
        int data_size1, data_size2, oob_size1, oob_size2;
        int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
+       int raw_cw = cw;
 
        nand_read_page_op(chip, page, 0, NULL, 0);
        host->use_ecc = false;
 
+       if (nandc->props->qpic_v2)
+               raw_cw = ecc->steps - 1;
+
        clear_bam_transaction(nandc);
        set_address(host, host->cw_size * cw, page);
-       update_rw_regs(host, 1, true, cw);
+       update_rw_regs(host, 1, true, raw_cw);
        config_nand_page_read(chip);
 
        data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
@@ -1711,7 +1715,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
                nandc_set_read_loc(chip, cw, 3, read_loc, oob_size2, 1);
        }
 
-       config_nand_cw_read(chip, false, cw);
+       config_nand_cw_read(chip, false, raw_cw);
 
        read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
        reg_off += data_size1;
index 308d4f2fff00139c6a3be11b73bd515a0b11fb41..eee47bad0592007c6bce70def5e015d2fab4cdaf 100644 (file)
@@ -32,8 +32,13 @@ static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg)
 static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count)
 {
        struct m_can_plat_priv *priv = cdev_to_priv(cdev);
+       void __iomem *src = priv->mram_base + offset;
 
-       ioread32_rep(priv->mram_base + offset, val, val_count);
+       while (val_count--) {
+               *(unsigned int *)val = ioread32(src);
+               val += 4;
+               src += 4;
+       }
 
        return 0;
 }
@@ -51,8 +56,13 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
                            const void *val, size_t val_count)
 {
        struct m_can_plat_priv *priv = cdev_to_priv(cdev);
+       void __iomem *dst = priv->mram_base + offset;
 
-       iowrite32_rep(priv->base + offset, val, val_count);
+       while (val_count--) {
+               iowrite32(*(unsigned int *)val, dst);
+               val += 4;
+               dst += 4;
+       }
 
        return 0;
 }
index 00e4533c8bddcdb89606aa8d588e89965e2a86af..8999ec9455ec22bc486f3905d19c1dbe6858335f 100644 (file)
@@ -846,10 +846,12 @@ static int __maybe_unused rcar_can_suspend(struct device *dev)
        struct rcar_can_priv *priv = netdev_priv(ndev);
        u16 ctlr;
 
-       if (netif_running(ndev)) {
-               netif_stop_queue(ndev);
-               netif_device_detach(ndev);
-       }
+       if (!netif_running(ndev))
+               return 0;
+
+       netif_stop_queue(ndev);
+       netif_device_detach(ndev);
+
        ctlr = readw(&priv->regs->ctlr);
        ctlr |= RCAR_CAN_CTLR_CANM_HALT;
        writew(ctlr, &priv->regs->ctlr);
@@ -868,6 +870,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
        u16 ctlr;
        int err;
 
+       if (!netif_running(ndev))
+               return 0;
+
        err = clk_enable(priv->clk);
        if (err) {
                netdev_err(ndev, "clk_enable() failed, error %d\n", err);
@@ -881,10 +886,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
        writew(ctlr, &priv->regs->ctlr);
        priv->can.state = CAN_STATE_ERROR_ACTIVE;
 
-       if (netif_running(ndev)) {
-               netif_device_attach(ndev);
-               netif_start_queue(ndev);
-       }
+       netif_device_attach(ndev);
+       netif_start_queue(ndev);
+
        return 0;
 }
 
index 6db90dc4bc9dd8144c2ce8aeab9c477ca3802b2e..84f34020aafb9ca1dcc1214e0cd615e6d1f904ab 100644 (file)
@@ -752,16 +752,15 @@ static void peak_pci_remove(struct pci_dev *pdev)
                struct net_device *prev_dev = chan->prev_dev;
 
                dev_info(&pdev->dev, "removing device %s\n", dev->name);
+               /* do that only for first channel */
+               if (!prev_dev && chan->pciec_card)
+                       peak_pciec_remove(chan->pciec_card);
                unregister_sja1000dev(dev);
                free_sja1000dev(dev);
                dev = prev_dev;
 
-               if (!dev) {
-                       /* do that only for first channel */
-                       if (chan->pciec_card)
-                               peak_pciec_remove(chan->pciec_card);
+               if (!dev)
                        break;
-               }
                priv = netdev_priv(dev);
                chan = priv->priv;
        }
index b11eabad575bb5003bc35ca6dce0af40d64a6fe7..09029a3bad1ac3e8df64ae8bfd04f0475707d8fd 100644 (file)
@@ -551,11 +551,10 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
        } else if (sm->channel_p_w_b & PUCAN_BUS_WARNING) {
                new_state = CAN_STATE_ERROR_WARNING;
        } else {
-               /* no error bit (so, no error skb, back to active state) */
-               dev->can.state = CAN_STATE_ERROR_ACTIVE;
+               /* back to (or still in) ERROR_ACTIVE state */
+               new_state = CAN_STATE_ERROR_ACTIVE;
                pdev->bec.txerr = 0;
                pdev->bec.rxerr = 0;
-               return 0;
        }
 
        /* state hasn't changed */
@@ -568,8 +567,7 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
 
        /* allocate an skb to store the error frame */
        skb = alloc_can_err_skb(netdev, &cf);
-       if (skb)
-               can_change_state(netdev, cf, tx_state, rx_state);
+       can_change_state(netdev, cf, tx_state, rx_state);
 
        /* things must be done even in case of OOM */
        if (new_state == CAN_STATE_BUS_OFF)
index 3ff4b7e177f3cb0d11ec59324df4e2222764dab0..dbd4486a173ff205c9f5ae1f02dbb092b56333f3 100644 (file)
 #define GSWIP_SDMA_PCTRLp(p)           (0xBC0 + ((p) * 0x6))
 #define  GSWIP_SDMA_PCTRL_EN           BIT(0)  /* SDMA Port Enable */
 #define  GSWIP_SDMA_PCTRL_FCEN         BIT(1)  /* Flow Control Enable */
-#define  GSWIP_SDMA_PCTRL_PAUFWD       BIT(1)  /* Pause Frame Forwarding */
+#define  GSWIP_SDMA_PCTRL_PAUFWD       BIT(3)  /* Pause Frame Forwarding */
 
 #define GSWIP_TABLE_ACTIVE_VLAN                0x01
 #define GSWIP_TABLE_VLAN_MAPPING       0x02
index 1542bfb8b5e54a595ef822a679d6725c8f7eb85e..7c2968a639eba552e552bb418fc3cbeabe8a9dea 100644 (file)
@@ -449,8 +449,10 @@ EXPORT_SYMBOL(ksz_switch_register);
 void ksz_switch_remove(struct ksz_device *dev)
 {
        /* timer started */
-       if (dev->mib_read_interval)
+       if (dev->mib_read_interval) {
+               dev->mib_read_interval = 0;
                cancel_delayed_work_sync(&dev->mib_read);
+       }
 
        dev->dev_ops->exit(dev);
        dsa_unregister_switch(dev->ds);
index 094737e5084a49bd5e8ad66381a42df76a952baa..9890672a206d0b1ca0cf575d04d770f9d48abe66 100644 (file)
@@ -1035,9 +1035,6 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
 {
        struct mt7530_priv *priv = ds->priv;
 
-       if (!dsa_is_user_port(ds, port))
-               return 0;
-
        mutex_lock(&priv->reg_mutex);
 
        /* Allow the user port gets connected to the cpu port and also
@@ -1060,9 +1057,6 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
 {
        struct mt7530_priv *priv = ds->priv;
 
-       if (!dsa_is_user_port(ds, port))
-               return;
-
        mutex_lock(&priv->reg_mutex);
 
        /* Clear up all port matrix which could be restored in the next
@@ -3211,7 +3205,7 @@ mt7530_probe(struct mdio_device *mdiodev)
                return -ENOMEM;
 
        priv->ds->dev = &mdiodev->dev;
-       priv->ds->num_ports = DSA_MAX_PORTS;
+       priv->ds->num_ports = MT7530_NUM_PORTS;
 
        /* Use medatek,mcm property to distinguish hardware type that would
         * casues a little bit differences on power-on sequence.
index 8ab0be793811e18fd8b8c2455872f81b65619cec..8dadcae93c9b53ef8ad4f3440ff9d12630c8a13e 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <linux/bitfield.h>
 #include <linux/delay.h>
+#include <linux/dsa/mv88e6xxx.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/if_bridge.h>
@@ -749,7 +750,11 @@ static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
        ops = chip->info->ops;
 
        mv88e6xxx_reg_lock(chip);
-       if ((!mv88e6xxx_port_ppu_updates(chip, port) ||
+       /* Internal PHYs propagate their configuration directly to the MAC.
+        * External PHYs depend on whether the PPU is enabled for this port.
+        */
+       if (((!mv88e6xxx_phy_is_internal(ds, port) &&
+             !mv88e6xxx_port_ppu_updates(chip, port)) ||
             mode == MLO_AN_FIXED) && ops->port_sync_link)
                err = ops->port_sync_link(chip, port, mode, false);
        mv88e6xxx_reg_unlock(chip);
@@ -772,7 +777,12 @@ static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port,
        ops = chip->info->ops;
 
        mv88e6xxx_reg_lock(chip);
-       if (!mv88e6xxx_port_ppu_updates(chip, port) || mode == MLO_AN_FIXED) {
+       /* Internal PHYs propagate their configuration directly to the MAC.
+        * External PHYs depend on whether the PPU is enabled for this port.
+        */
+       if ((!mv88e6xxx_phy_is_internal(ds, port) &&
+            !mv88e6xxx_port_ppu_updates(chip, port)) ||
+           mode == MLO_AN_FIXED) {
                /* FIXME: for an automedia port, should we force the link
                 * down here - what if the link comes up due to "other" media
                 * while we're bringing the port up, how is the exclusivity
@@ -1677,6 +1687,30 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
        return 0;
 }
 
+static int mv88e6xxx_port_commit_pvid(struct mv88e6xxx_chip *chip, int port)
+{
+       struct dsa_port *dp = dsa_to_port(chip->ds, port);
+       struct mv88e6xxx_port *p = &chip->ports[port];
+       u16 pvid = MV88E6XXX_VID_STANDALONE;
+       bool drop_untagged = false;
+       int err;
+
+       if (dp->bridge_dev) {
+               if (br_vlan_enabled(dp->bridge_dev)) {
+                       pvid = p->bridge_pvid.vid;
+                       drop_untagged = !p->bridge_pvid.valid;
+               } else {
+                       pvid = MV88E6XXX_VID_BRIDGED;
+               }
+       }
+
+       err = mv88e6xxx_port_set_pvid(chip, port, pvid);
+       if (err)
+               return err;
+
+       return mv88e6xxx_port_drop_untagged(chip, port, drop_untagged);
+}
+
 static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
                                         bool vlan_filtering,
                                         struct netlink_ext_ack *extack)
@@ -1690,7 +1724,16 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
                return -EOPNOTSUPP;
 
        mv88e6xxx_reg_lock(chip);
+
        err = mv88e6xxx_port_set_8021q_mode(chip, port, mode);
+       if (err)
+               goto unlock;
+
+       err = mv88e6xxx_port_commit_pvid(chip, port);
+       if (err)
+               goto unlock;
+
+unlock:
        mv88e6xxx_reg_unlock(chip);
 
        return err;
@@ -1725,11 +1768,15 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
        u16 fid;
        int err;
 
-       /* Null VLAN ID corresponds to the port private database */
+       /* Ports have two private address databases: one for when the port is
+        * standalone and one for when the port is under a bridge and the
+        * 802.1Q mode is disabled. When the port is standalone, DSA wants its
+        * address database to remain 100% empty, so we never load an ATU entry
+        * into a standalone port's database. Therefore, translate the null
+        * VLAN ID into the port's database used for VLAN-unaware bridging.
+        */
        if (vid == 0) {
-               err = mv88e6xxx_port_get_fid(chip, port, &fid);
-               if (err)
-                       return err;
+               fid = MV88E6XXX_FID_BRIDGED;
        } else {
                err = mv88e6xxx_vtu_get(chip, vid, &vlan);
                if (err)
@@ -2123,6 +2170,7 @@ static int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
        struct mv88e6xxx_chip *chip = ds->priv;
        bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
        bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+       struct mv88e6xxx_port *p = &chip->ports[port];
        bool warn;
        u8 member;
        int err;
@@ -2156,13 +2204,21 @@ static int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
        }
 
        if (pvid) {
-               err = mv88e6xxx_port_set_pvid(chip, port, vlan->vid);
-               if (err) {
-                       dev_err(ds->dev, "p%d: failed to set PVID %d\n",
-                               port, vlan->vid);
+               p->bridge_pvid.vid = vlan->vid;
+               p->bridge_pvid.valid = true;
+
+               err = mv88e6xxx_port_commit_pvid(chip, port);
+               if (err)
+                       goto out;
+       } else if (vlan->vid && p->bridge_pvid.vid == vlan->vid) {
+               /* The old pvid was reinstalled as a non-pvid VLAN */
+               p->bridge_pvid.valid = false;
+
+               err = mv88e6xxx_port_commit_pvid(chip, port);
+               if (err)
                        goto out;
-               }
        }
+
 out:
        mv88e6xxx_reg_unlock(chip);
 
@@ -2212,6 +2268,7 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
                                   const struct switchdev_obj_port_vlan *vlan)
 {
        struct mv88e6xxx_chip *chip = ds->priv;
+       struct mv88e6xxx_port *p = &chip->ports[port];
        int err = 0;
        u16 pvid;
 
@@ -2229,7 +2286,9 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
                goto unlock;
 
        if (vlan->vid == pvid) {
-               err = mv88e6xxx_port_set_pvid(chip, port, 0);
+               p->bridge_pvid.valid = false;
+
+               err = mv88e6xxx_port_commit_pvid(chip, port);
                if (err)
                        goto unlock;
        }
@@ -2393,7 +2452,16 @@ static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
        int err;
 
        mv88e6xxx_reg_lock(chip);
+
        err = mv88e6xxx_bridge_map(chip, br);
+       if (err)
+               goto unlock;
+
+       err = mv88e6xxx_port_commit_pvid(chip, port);
+       if (err)
+               goto unlock;
+
+unlock:
        mv88e6xxx_reg_unlock(chip);
 
        return err;
@@ -2403,11 +2471,20 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
                                        struct net_device *br)
 {
        struct mv88e6xxx_chip *chip = ds->priv;
+       int err;
 
        mv88e6xxx_reg_lock(chip);
+
        if (mv88e6xxx_bridge_map(chip, br) ||
            mv88e6xxx_port_vlan_map(chip, port))
                dev_err(ds->dev, "failed to remap in-chip Port VLAN\n");
+
+       err = mv88e6xxx_port_commit_pvid(chip, port);
+       if (err)
+               dev_err(ds->dev,
+                       "port %d failed to restore standalone pvid: %pe\n",
+                       port, ERR_PTR(err));
+
        mv88e6xxx_reg_unlock(chip);
 }
 
@@ -2834,8 +2911,8 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
        if (err)
                return err;
 
-       /* Port Control 2: don't force a good FCS, set the maximum frame size to
-        * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
+       /* Port Control 2: don't force a good FCS, set the MTU size to
+        * 10222 bytes, disable 802.1q tags checking, don't discard tagged or
         * untagged frames on this port, do a destination address lookup on all
         * received packets as usual, disable ARP mirroring and don't send a
         * copy of all transmitted/received frames on this port to the CPU.
@@ -2853,8 +2930,22 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
        if (err)
                return err;
 
+       /* Associate MV88E6XXX_VID_BRIDGED with MV88E6XXX_FID_BRIDGED in the
+        * ATU by virtue of the fact that mv88e6xxx_atu_new() will pick it as
+        * the first free FID after MV88E6XXX_FID_STANDALONE. This will be used
+        * as the private PVID on ports under a VLAN-unaware bridge.
+        * Shared (DSA and CPU) ports must also be members of it, to translate
+        * the VID from the DSA tag into MV88E6XXX_FID_BRIDGED, instead of
+        * relying on their port default FID.
+        */
+       err = mv88e6xxx_port_vlan_join(chip, port, MV88E6XXX_VID_BRIDGED,
+                                      MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNTAGGED,
+                                      false);
+       if (err)
+               return err;
+
        if (chip->info->ops->port_set_jumbo_size) {
-               err = chip->info->ops->port_set_jumbo_size(chip, port, 10240);
+               err = chip->info->ops->port_set_jumbo_size(chip, port, 10218);
                if (err)
                        return err;
        }
@@ -2925,7 +3016,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
         * database, and allow bidirectional communication between the
         * CPU and DSA port(s), and the other ports.
         */
-       err = mv88e6xxx_port_set_fid(chip, port, 0);
+       err = mv88e6xxx_port_set_fid(chip, port, MV88E6XXX_FID_STANDALONE);
        if (err)
                return err;
 
@@ -2944,10 +3035,10 @@ static int mv88e6xxx_get_max_mtu(struct dsa_switch *ds, int port)
        struct mv88e6xxx_chip *chip = ds->priv;
 
        if (chip->info->ops->port_set_jumbo_size)
-               return 10240;
+               return 10240 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
        else if (chip->info->ops->set_max_frame_size)
-               return 1632;
-       return 1522;
+               return 1632 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
+       return 1522 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
 }
 
 static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
@@ -2955,6 +3046,9 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
        struct mv88e6xxx_chip *chip = ds->priv;
        int ret = 0;
 
+       if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+               new_mtu += EDSA_HLEN;
+
        mv88e6xxx_reg_lock(chip);
        if (chip->info->ops->port_set_jumbo_size)
                ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu);
@@ -3112,6 +3206,10 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
                }
        }
 
+       err = mv88e6xxx_vtu_setup(chip);
+       if (err)
+               goto unlock;
+
        /* Setup Switch Port Registers */
        for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
                if (dsa_is_unused_port(ds, i))
@@ -3141,10 +3239,6 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
        if (err)
                goto unlock;
 
-       err = mv88e6xxx_vtu_setup(chip);
-       if (err)
-               goto unlock;
-
        err = mv88e6xxx_pvt_setup(chip);
        if (err)
                goto unlock;
@@ -3725,7 +3819,6 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
        .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
        .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
        .port_pause_limit = mv88e6097_port_pause_limit,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
@@ -3750,6 +3843,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
        .avb_ops = &mv88e6165_avb_ops,
        .ptp_ops = &mv88e6165_ptp_ops,
        .phylink_validate = mv88e6185_phylink_validate,
+       .set_max_frame_size = mv88e6185_g1_set_max_frame_size,
 };
 
 static const struct mv88e6xxx_ops mv88e6165_ops = {
index 675b1f3e43b7bfc16694b226341710d06ab7744d..8271b8aa7b71eafad4fbcf16c7f550fa1a872e56 100644 (file)
 #include <linux/timecounter.h>
 #include <net/dsa.h>
 
+#define EDSA_HLEN              8
 #define MV88E6XXX_N_FID                4096
 
+#define MV88E6XXX_FID_STANDALONE       0
+#define MV88E6XXX_FID_BRIDGED          1
+
 /* PVT limits for 4-bit port and 5-bit switch */
 #define MV88E6XXX_MAX_PVT_SWITCHES     32
 #define MV88E6XXX_MAX_PVT_PORTS                16
@@ -245,9 +249,15 @@ struct mv88e6xxx_policy {
        u16 vid;
 };
 
+struct mv88e6xxx_vlan {
+       u16     vid;
+       bool    valid;
+};
+
 struct mv88e6xxx_port {
        struct mv88e6xxx_chip *chip;
        int port;
+       struct mv88e6xxx_vlan bridge_pvid;
        u64 serdes_stats[2];
        u64 atu_member_violation;
        u64 atu_miss_violation;
index 815b0f681d698fc75848a16861f32524083456b2..5848112036b08d90ea584f49d9146638ea695158 100644 (file)
@@ -232,6 +232,8 @@ int mv88e6185_g1_set_max_frame_size(struct mv88e6xxx_chip *chip, int mtu)
        u16 val;
        int err;
 
+       mtu += ETH_HLEN + ETH_FCS_LEN;
+
        err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
        if (err)
                return err;
index f77e2ee64a607a8cae7c6c1c927c4a6f1a01d9b9..d9817b20ea641f9b642435e693b5ee7e86398a7e 100644 (file)
@@ -1257,6 +1257,27 @@ int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
        return 0;
 }
 
+int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port,
+                                bool drop_untagged)
+{
+       u16 old, new;
+       int err;
+
+       err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, &old);
+       if (err)
+               return err;
+
+       if (drop_untagged)
+               new = old | MV88E6XXX_PORT_CTL2_DISCARD_UNTAGGED;
+       else
+               new = old & ~MV88E6XXX_PORT_CTL2_DISCARD_UNTAGGED;
+
+       if (new == old)
+               return 0;
+
+       return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, new);
+}
+
 int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port)
 {
        u16 reg;
@@ -1277,6 +1298,8 @@ int mv88e6165_port_set_jumbo_size(struct mv88e6xxx_chip *chip, int port,
        u16 reg;
        int err;
 
+       size += VLAN_ETH_HLEN + ETH_FCS_LEN;
+
        err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, &reg);
        if (err)
                return err;
index b10e5aebacf6805916ddf6f28136e069e1947946..03382b66f80037dbf731178b3f6e1ee6c122db2a 100644 (file)
@@ -423,6 +423,8 @@ int mv88e6393x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
                              phy_interface_t mode);
 int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
 int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
+int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port,
+                                bool drop_untagged);
 int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
 int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
                                     int upstream_port);
index a3a9636430d6c0f1782c52e5561bac6ecdfe709c..341236dcbdb472b6d8a2fd19b9a8e3aebed625d8 100644 (file)
@@ -266,12 +266,12 @@ static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port)
  */
 static int felix_setup_mmio_filtering(struct felix *felix)
 {
-       unsigned long user_ports = 0, cpu_ports = 0;
+       unsigned long user_ports = dsa_user_ports(felix->ds);
        struct ocelot_vcap_filter *redirect_rule;
        struct ocelot_vcap_filter *tagging_rule;
        struct ocelot *ocelot = &felix->ocelot;
        struct dsa_switch *ds = felix->ds;
-       int port, ret;
+       int cpu = -1, port, ret;
 
        tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
        if (!tagging_rule)
@@ -284,12 +284,15 @@ static int felix_setup_mmio_filtering(struct felix *felix)
        }
 
        for (port = 0; port < ocelot->num_phys_ports; port++) {
-               if (dsa_is_user_port(ds, port))
-                       user_ports |= BIT(port);
-               if (dsa_is_cpu_port(ds, port))
-                       cpu_ports |= BIT(port);
+               if (dsa_is_cpu_port(ds, port)) {
+                       cpu = port;
+                       break;
+               }
        }
 
+       if (cpu < 0)
+               return -EINVAL;
+
        tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE;
        *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588);
        *(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff);
@@ -325,7 +328,7 @@ static int felix_setup_mmio_filtering(struct felix *felix)
                 * the CPU port module
                 */
                redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
-               redirect_rule->action.port_mask = cpu_ports;
+               redirect_rule->action.port_mask = BIT(cpu);
        } else {
                /* Trap PTP packets only to the CPU port module (which is
                 * redirected to the NPI port)
@@ -1074,6 +1077,101 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
        return 0;
 }
 
+static void ocelot_port_purge_txtstamp_skb(struct ocelot *ocelot, int port,
+                                          struct sk_buff *skb)
+{
+       struct ocelot_port *ocelot_port = ocelot->ports[port];
+       struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
+       struct sk_buff *skb_match = NULL, *skb_tmp;
+       unsigned long flags;
+
+       if (!clone)
+               return;
+
+       spin_lock_irqsave(&ocelot_port->tx_skbs.lock, flags);
+
+       skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) {
+               if (skb != clone)
+                       continue;
+               __skb_unlink(skb, &ocelot_port->tx_skbs);
+               skb_match = skb;
+               break;
+       }
+
+       spin_unlock_irqrestore(&ocelot_port->tx_skbs.lock, flags);
+
+       WARN_ONCE(!skb_match,
+                 "Could not find skb clone in TX timestamping list\n");
+}
+
+#define work_to_xmit_work(w) \
+               container_of((w), struct felix_deferred_xmit_work, work)
+
+static void felix_port_deferred_xmit(struct kthread_work *work)
+{
+       struct felix_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
+       struct dsa_switch *ds = xmit_work->dp->ds;
+       struct sk_buff *skb = xmit_work->skb;
+       u32 rew_op = ocelot_ptp_rew_op(skb);
+       struct ocelot *ocelot = ds->priv;
+       int port = xmit_work->dp->index;
+       int retries = 10;
+
+       do {
+               if (ocelot_can_inject(ocelot, 0))
+                       break;
+
+               cpu_relax();
+       } while (--retries);
+
+       if (!retries) {
+               dev_err(ocelot->dev, "port %d failed to inject skb\n",
+                       port);
+               ocelot_port_purge_txtstamp_skb(ocelot, port, skb);
+               kfree_skb(skb);
+               return;
+       }
+
+       ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
+
+       consume_skb(skb);
+       kfree(xmit_work);
+}
+
+static int felix_port_setup_tagger_data(struct dsa_switch *ds, int port)
+{
+       struct dsa_port *dp = dsa_to_port(ds, port);
+       struct ocelot *ocelot = ds->priv;
+       struct felix *felix = ocelot_to_felix(ocelot);
+       struct felix_port *felix_port;
+
+       if (!dsa_port_is_user(dp))
+               return 0;
+
+       felix_port = kzalloc(sizeof(*felix_port), GFP_KERNEL);
+       if (!felix_port)
+               return -ENOMEM;
+
+       felix_port->xmit_worker = felix->xmit_worker;
+       felix_port->xmit_work_fn = felix_port_deferred_xmit;
+
+       dp->priv = felix_port;
+
+       return 0;
+}
+
+static void felix_port_teardown_tagger_data(struct dsa_switch *ds, int port)
+{
+       struct dsa_port *dp = dsa_to_port(ds, port);
+       struct felix_port *felix_port = dp->priv;
+
+       if (!felix_port)
+               return;
+
+       dp->priv = NULL;
+       kfree(felix_port);
+}
+
 /* Hardware initialization done here so that we can allocate structures with
  * devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing
  * us to allocate structures twice (leak memory) and map PCI memory twice
@@ -1102,6 +1200,12 @@ static int felix_setup(struct dsa_switch *ds)
                }
        }
 
+       felix->xmit_worker = kthread_create_worker(0, "felix_xmit");
+       if (IS_ERR(felix->xmit_worker)) {
+               err = PTR_ERR(felix->xmit_worker);
+               goto out_deinit_timestamp;
+       }
+
        for (port = 0; port < ds->num_ports; port++) {
                if (dsa_is_unused_port(ds, port))
                        continue;
@@ -1112,6 +1216,14 @@ static int felix_setup(struct dsa_switch *ds)
                 * bits of vlan tag.
                 */
                felix_port_qos_map_init(ocelot, port);
+
+               err = felix_port_setup_tagger_data(ds, port);
+               if (err) {
+                       dev_err(ds->dev,
+                               "port %d failed to set up tagger data: %pe\n",
+                               port, ERR_PTR(err));
+                       goto out_deinit_ports;
+               }
        }
 
        err = ocelot_devlink_sb_register(ocelot);
@@ -1126,6 +1238,7 @@ static int felix_setup(struct dsa_switch *ds)
                 * there's no real point in checking for errors.
                 */
                felix_set_tag_protocol(ds, port, felix->tag_proto);
+               break;
        }
 
        ds->mtu_enforcement_ingress = true;
@@ -1138,9 +1251,13 @@ out_deinit_ports:
                if (dsa_is_unused_port(ds, port))
                        continue;
 
+               felix_port_teardown_tagger_data(ds, port);
                ocelot_deinit_port(ocelot, port);
        }
 
+       kthread_destroy_worker(felix->xmit_worker);
+
+out_deinit_timestamp:
        ocelot_deinit_timestamp(ocelot);
        ocelot_deinit(ocelot);
 
@@ -1162,19 +1279,23 @@ static void felix_teardown(struct dsa_switch *ds)
                        continue;
 
                felix_del_tag_protocol(ds, port, felix->tag_proto);
+               break;
        }
 
-       ocelot_devlink_sb_unregister(ocelot);
-       ocelot_deinit_timestamp(ocelot);
-       ocelot_deinit(ocelot);
-
        for (port = 0; port < ocelot->num_phys_ports; port++) {
                if (dsa_is_unused_port(ds, port))
                        continue;
 
+               felix_port_teardown_tagger_data(ds, port);
                ocelot_deinit_port(ocelot, port);
        }
 
+       kthread_destroy_worker(felix->xmit_worker);
+
+       ocelot_devlink_sb_unregister(ocelot);
+       ocelot_deinit_timestamp(ocelot);
+       ocelot_deinit(ocelot);
+
        if (felix->info->mdio_bus_free)
                felix->info->mdio_bus_free(ocelot);
 }
@@ -1291,8 +1412,12 @@ static void felix_txtstamp(struct dsa_switch *ds, int port,
        if (!ocelot->ptp)
                return;
 
-       if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone))
+       if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) {
+               dev_err_ratelimited(ds->dev,
+                                   "port %d delivering skb without TX timestamp\n",
+                                   port);
                return;
+       }
 
        if (clone)
                OCELOT_SKB_CB(skb)->clone = clone;
index 54024b6f9498f4c3bf2551252ce63f5779bbf25e..be3e42e135c008213a51ec6d51f6277f5238fb39 100644 (file)
@@ -62,6 +62,7 @@ struct felix {
        resource_size_t                 switch_base;
        resource_size_t                 imdio_base;
        enum dsa_tag_protocol           tag_proto;
+       struct kthread_worker           *xmit_worker;
 };
 
 struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port);
index 7c0db80eff008fb9c2aa5ee1a2f5ff83fda3d966..924c3f129992f27c134ed95873404ec9ae8695ed 100644 (file)
@@ -3117,7 +3117,7 @@ static void sja1105_teardown(struct dsa_switch *ds)
        sja1105_static_config_free(&priv->static_config);
 }
 
-const struct dsa_switch_ops sja1105_switch_ops = {
+static const struct dsa_switch_ops sja1105_switch_ops = {
        .get_tag_protocol       = sja1105_get_tag_protocol,
        .setup                  = sja1105_setup,
        .teardown               = sja1105_teardown,
@@ -3166,7 +3166,6 @@ const struct dsa_switch_ops sja1105_switch_ops = {
        .port_bridge_tx_fwd_offload = dsa_tag_8021q_bridge_tx_fwd_offload,
        .port_bridge_tx_fwd_unoffload = dsa_tag_8021q_bridge_tx_fwd_unoffload,
 };
-EXPORT_SYMBOL_GPL(sja1105_switch_ops);
 
 static const struct of_device_id sja1105_dt_ids[];
 
index 691f6dd7e669738e3316b0909ba578dfed78bcd4..54396992a9199c39b1abcabced620e5c6b4e9c58 100644 (file)
@@ -64,6 +64,7 @@ enum sja1105_ptp_clk_mode {
 static int sja1105_change_rxtstamping(struct sja1105_private *priv,
                                      bool on)
 {
+       struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
        struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
        struct sja1105_general_params_entry *general_params;
        struct sja1105_table *table;
@@ -79,7 +80,7 @@ static int sja1105_change_rxtstamping(struct sja1105_private *priv,
                priv->tagger_data.stampable_skb = NULL;
        }
        ptp_cancel_worker_sync(ptp_data->clock);
-       skb_queue_purge(&ptp_data->skb_txtstamp_queue);
+       skb_queue_purge(&tagger_data->skb_txtstamp_queue);
        skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
 
        return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING);
@@ -452,40 +453,6 @@ bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
        return priv->info->rxtstamp(ds, port, skb);
 }
 
-void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id,
-                                enum sja1110_meta_tstamp dir, u64 tstamp)
-{
-       struct sja1105_private *priv = ds->priv;
-       struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
-       struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
-       struct skb_shared_hwtstamps shwt = {0};
-
-       /* We don't care about RX timestamps on the CPU port */
-       if (dir == SJA1110_META_TSTAMP_RX)
-               return;
-
-       spin_lock(&ptp_data->skb_txtstamp_queue.lock);
-
-       skb_queue_walk_safe(&ptp_data->skb_txtstamp_queue, skb, skb_tmp) {
-               if (SJA1105_SKB_CB(skb)->ts_id != ts_id)
-                       continue;
-
-               __skb_unlink(skb, &ptp_data->skb_txtstamp_queue);
-               skb_match = skb;
-
-               break;
-       }
-
-       spin_unlock(&ptp_data->skb_txtstamp_queue.lock);
-
-       if (WARN_ON(!skb_match))
-               return;
-
-       shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp));
-       skb_complete_tx_timestamp(skb_match, &shwt);
-}
-EXPORT_SYMBOL_GPL(sja1110_process_meta_tstamp);
-
 /* In addition to cloning the skb which is done by the common
  * sja1105_port_txtstamp, we need to generate a timestamp ID and save the
  * packet to the TX timestamping queue.
@@ -494,7 +461,6 @@ void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
 {
        struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
        struct sja1105_private *priv = ds->priv;
-       struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
        struct sja1105_port *sp = &priv->ports[port];
        u8 ts_id;
 
@@ -510,7 +476,7 @@ void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
 
        spin_unlock(&sp->data->meta_lock);
 
-       skb_queue_tail(&ptp_data->skb_txtstamp_queue, clone);
+       skb_queue_tail(&sp->data->skb_txtstamp_queue, clone);
 }
 
 /* Called from dsa_skb_tx_timestamp. This callback is just to clone
@@ -953,7 +919,7 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
        /* Only used on SJA1105 */
        skb_queue_head_init(&ptp_data->skb_rxtstamp_queue);
        /* Only used on SJA1110 */
-       skb_queue_head_init(&ptp_data->skb_txtstamp_queue);
+       skb_queue_head_init(&tagger_data->skb_txtstamp_queue);
        spin_lock_init(&tagger_data->meta_lock);
 
        ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev);
@@ -971,6 +937,7 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
 void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
 {
        struct sja1105_private *priv = ds->priv;
+       struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
        struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
 
        if (IS_ERR_OR_NULL(ptp_data->clock))
@@ -978,7 +945,7 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
 
        del_timer_sync(&ptp_data->extts_timer);
        ptp_cancel_worker_sync(ptp_data->clock);
-       skb_queue_purge(&ptp_data->skb_txtstamp_queue);
+       skb_queue_purge(&tagger_data->skb_txtstamp_queue);
        skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
        ptp_clock_unregister(ptp_data->clock);
        ptp_data->clock = NULL;
index 3c874bb4c17b7385b727c436c3482b4ea8529231..3ae6b9fdd492b5eaadc6e96b233ce8536fbddfb5 100644 (file)
@@ -8,21 +8,6 @@
 
 #if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP)
 
-/* Timestamps are in units of 8 ns clock ticks (equivalent to
- * a fixed 125 MHz clock).
- */
-#define SJA1105_TICK_NS                        8
-
-static inline s64 ns_to_sja1105_ticks(s64 ns)
-{
-       return ns / SJA1105_TICK_NS;
-}
-
-static inline s64 sja1105_ticks_to_ns(s64 ticks)
-{
-       return ticks * SJA1105_TICK_NS;
-}
-
 /* Calculate the first base_time in the future that satisfies this
  * relationship:
  *
@@ -77,10 +62,6 @@ struct sja1105_ptp_data {
        struct timer_list extts_timer;
        /* Used only on SJA1105 to reconstruct partial timestamps */
        struct sk_buff_head skb_rxtstamp_queue;
-       /* Used on SJA1110 where meta frames are generated only for
-        * 2-step TX timestamps
-        */
-       struct sk_buff_head skb_txtstamp_queue;
        struct ptp_clock_info caps;
        struct ptp_clock *clock;
        struct sja1105_ptp_cmd cmd;
index d796684ec9ca00523b774881fbbcbb3d9f06263b..412ae3e43ffb71782fc810c9541ccec749aa053e 100644 (file)
@@ -100,6 +100,7 @@ config JME
 config KORINA
        tristate "Korina (IDT RC32434) Ethernet support"
        depends on MIKROTIK_RB532 || COMPILE_TEST
+       select CRC32
        select MII
        help
          If you have a Mikrotik RouterBoard 500 or IDT RC32434
index 37a41773dd4350d96fded737601db7afef3ee521..92a79c4ffa2c7bc3313cf90cc2c8b8c513be47a2 100644 (file)
@@ -21,6 +21,7 @@ config ARC_EMAC_CORE
        depends on ARC || ARCH_ROCKCHIP || COMPILE_TEST
        select MII
        select PHYLIB
+       select CRC32
 
 config ARC_EMAC
        tristate "ARC EMAC support"
index 4ab5bf64d353e35761d468c90394cb6a4e05cfd2..df8ff839cc62142cf7b2b94a11b76a5860c6306c 100644 (file)
@@ -192,6 +192,9 @@ static int bgmac_probe(struct platform_device *pdev)
        bgmac->dma_dev = &pdev->dev;
 
        ret = of_get_mac_address(np, bgmac->net_dev->dev_addr);
+       if (ret == -EPROBE_DEFER)
+               return ret;
+
        if (ret)
                dev_warn(&pdev->dev,
                         "MAC address not present in device tree\n");
index 691e1475d55e4eb24fa8a774bebf89af75d1487c..0fbecd093fa1f049ff0445feb61921a8c06291e7 100644 (file)
@@ -1193,7 +1193,7 @@ static int nic_register_interrupts(struct nicpf *nic)
                dev_err(&nic->pdev->dev,
                        "Request for #%d msix vectors failed, returned %d\n",
                           nic->num_vec, ret);
-               return 1;
+               return ret;
        }
 
        /* Register mailbox interrupt handler */
index d1667b7595223fa80a045422caafdba9e4d342e7..a27227aeae88007c3b2cce7ae7ad50214ce6a564 100644 (file)
@@ -1224,7 +1224,7 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic)
        if (ret < 0) {
                netdev_err(nic->netdev,
                           "Req for #%d msix vectors failed\n", nic->num_vec);
-               return 1;
+               return ret;
        }
 
        sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
@@ -1243,7 +1243,7 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic)
        if (!nicvf_check_pf_ready(nic)) {
                nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
                nicvf_unregister_interrupts(nic);
-               return 1;
+               return -EIO;
        }
 
        return 0;
index 9690e36e9e8554fdf17220af3f6913f9a67f096f..910b9f722504a276b8417e2e01c1aef86bb852bf 100644 (file)
@@ -157,7 +157,7 @@ static const struct {
        { ENETC_PM0_TFRM,   "MAC tx frames" },
        { ENETC_PM0_TFCS,   "MAC tx fcs errors" },
        { ENETC_PM0_TVLAN,  "MAC tx VLAN frames" },
-       { ENETC_PM0_TERR,   "MAC tx frames" },
+       { ENETC_PM0_TERR,   "MAC tx frame errors" },
        { ENETC_PM0_TUCA,   "MAC tx unicast frames" },
        { ENETC_PM0_TMCA,   "MAC tx multicast frames" },
        { ENETC_PM0_TBCA,   "MAC tx broadcast frames" },
index 60d94e0a07d6e55a57b169bed3e920947fe1e853..d522bd5c90b498f11ebdd0c7270486c1972c2af3 100644 (file)
@@ -517,10 +517,13 @@ static void enetc_port_si_configure(struct enetc_si *si)
 
 static void enetc_configure_port_mac(struct enetc_hw *hw)
 {
+       int tc;
+
        enetc_port_wr(hw, ENETC_PM0_MAXFRM,
                      ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
 
-       enetc_port_wr(hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
+       for (tc = 0; tc < 8; tc++)
+               enetc_port_wr(hw, ENETC_PTCMSDUR(tc), ENETC_MAC_MAXFRM_SIZE);
 
        enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
                      ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
@@ -541,8 +544,7 @@ static void enetc_mac_config(struct enetc_hw *hw, phy_interface_t phy_mode)
 
        if (phy_interface_mode_is_rgmii(phy_mode)) {
                val = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
-               val &= ~ENETC_PM0_IFM_EN_AUTO;
-               val &= ENETC_PM0_IFM_IFMODE_MASK;
+               val &= ~(ENETC_PM0_IFM_EN_AUTO | ENETC_PM0_IFM_IFMODE_MASK);
                val |= ENETC_PM0_IFM_IFMODE_GMII | ENETC_PM0_IFM_RG;
                enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
        }
index 1d3188e8e3b3c01caa84c3edbf332cd0c1fa5dd2..92dc18a4bcc41c233e97e693a85359d86f4b95cb 100644 (file)
@@ -780,7 +780,7 @@ struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
                                    gve_num_tx_qpls(priv));
 
        /* we are out of rx qpls */
-       if (id == priv->qpl_cfg.qpl_map_size)
+       if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv))
                return NULL;
 
        set_bit(id, priv->qpl_cfg.qpl_id_map);
index 099a2bc5ae6704553516561106a0bb6822fdb8e4..bf8a4a7c43f78001f74c80a2312ff5aef21aff90 100644 (file)
@@ -41,6 +41,7 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
 {
        struct gve_priv *priv = netdev_priv(dev);
        unsigned int start;
+       u64 packets, bytes;
        int ring;
 
        if (priv->rx) {
@@ -48,10 +49,12 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
                        do {
                                start =
                                  u64_stats_fetch_begin(&priv->rx[ring].statss);
-                               s->rx_packets += priv->rx[ring].rpackets;
-                               s->rx_bytes += priv->rx[ring].rbytes;
+                               packets = priv->rx[ring].rpackets;
+                               bytes = priv->rx[ring].rbytes;
                        } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
                                                       start));
+                       s->rx_packets += packets;
+                       s->rx_bytes += bytes;
                }
        }
        if (priv->tx) {
@@ -59,10 +62,12 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
                        do {
                                start =
                                  u64_stats_fetch_begin(&priv->tx[ring].statss);
-                               s->tx_packets += priv->tx[ring].pkt_done;
-                               s->tx_bytes += priv->tx[ring].bytes_done;
+                               packets = priv->tx[ring].pkt_done;
+                               bytes = priv->tx[ring].bytes_done;
                        } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
                                                       start));
+                       s->tx_packets += packets;
+                       s->tx_bytes += bytes;
                }
        }
 }
@@ -82,6 +87,9 @@ static int gve_alloc_counter_array(struct gve_priv *priv)
 
 static void gve_free_counter_array(struct gve_priv *priv)
 {
+       if (!priv->counter_array)
+               return;
+
        dma_free_coherent(&priv->pdev->dev,
                          priv->num_event_counters *
                          sizeof(*priv->counter_array),
@@ -142,6 +150,9 @@ static int gve_alloc_stats_report(struct gve_priv *priv)
 
 static void gve_free_stats_report(struct gve_priv *priv)
 {
+       if (!priv->stats_report)
+               return;
+
        del_timer_sync(&priv->stats_report_timer);
        dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
                          priv->stats_report, priv->stats_report_bus);
@@ -370,18 +381,19 @@ static void gve_free_notify_blocks(struct gve_priv *priv)
 {
        int i;
 
-       if (priv->msix_vectors) {
-               /* Free the irqs */
-               for (i = 0; i < priv->num_ntfy_blks; i++) {
-                       struct gve_notify_block *block = &priv->ntfy_blocks[i];
-                       int msix_idx = i;
+       if (!priv->msix_vectors)
+               return;
 
-                       irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
-                                             NULL);
-                       free_irq(priv->msix_vectors[msix_idx].vector, block);
-               }
-               free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
+       /* Free the irqs */
+       for (i = 0; i < priv->num_ntfy_blks; i++) {
+               struct gve_notify_block *block = &priv->ntfy_blocks[i];
+               int msix_idx = i;
+
+               irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
+                                     NULL);
+               free_irq(priv->msix_vectors[msix_idx].vector, block);
        }
+       free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
        dma_free_coherent(&priv->pdev->dev,
                          priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
                          priv->ntfy_blocks, priv->ntfy_block_bus);
@@ -1185,9 +1197,10 @@ static void gve_handle_reset(struct gve_priv *priv)
 
 void gve_handle_report_stats(struct gve_priv *priv)
 {
-       int idx, stats_idx = 0, tx_bytes;
-       unsigned int start = 0;
        struct stats *stats = priv->stats_report->stats;
+       int idx, stats_idx = 0;
+       unsigned int start = 0;
+       u64 tx_bytes;
 
        if (!gve_get_report_stats(priv))
                return;
index bb8261368250293a873565cd69da3838cc358c8c..94941d4e47449244a3c4f5fe2c8248cab24300ec 100644 (file)
@@ -104,8 +104,14 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
        if (!rx->data.page_info)
                return -ENOMEM;
 
-       if (!rx->data.raw_addressing)
+       if (!rx->data.raw_addressing) {
                rx->data.qpl = gve_assign_rx_qpl(priv);
+               if (!rx->data.qpl) {
+                       kvfree(rx->data.page_info);
+                       rx->data.page_info = NULL;
+                       return -ENOMEM;
+               }
+       }
        for (i = 0; i < slots; i++) {
                if (!rx->data.raw_addressing) {
                        struct page *page = rx->data.qpl->pages[i];
index eef1b2764d34ae60499cb3ce110c4390aba138eb..67b0bf310daaafd1f4a6f4b3bf6c64d06d99e10f 100644 (file)
@@ -10,6 +10,27 @@ static LIST_HEAD(hnae3_ae_algo_list);
 static LIST_HEAD(hnae3_client_list);
 static LIST_HEAD(hnae3_ae_dev_list);
 
+void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo)
+{
+       const struct pci_device_id *pci_id;
+       struct hnae3_ae_dev *ae_dev;
+
+       if (!ae_algo)
+               return;
+
+       list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
+               if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+                       continue;
+
+               pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
+               if (!pci_id)
+                       continue;
+               if (IS_ENABLED(CONFIG_PCI_IOV))
+                       pci_disable_sriov(ae_dev->pdev);
+       }
+}
+EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
+
 /* we are keeping things simple and using single lock for all the
  * list. This is a non-critical code so other updations, if happen
  * in parallel, can wait.
index 546a605303848aa1db81452707b4e89ab7b52bac..d701451596c825e78b55bb3164b76790df3a578a 100644 (file)
@@ -752,7 +752,6 @@ struct hnae3_tc_info {
        u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
        u16 tqp_count[HNAE3_MAX_TC];
        u16 tqp_offset[HNAE3_MAX_TC];
-       unsigned long tc_en; /* bitmap of TC enabled */
        u8 num_tc; /* Total number of enabled TCs */
        bool mqprio_active;
 };
@@ -854,6 +853,7 @@ struct hnae3_handle {
 int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
 void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
 
+void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo);
 void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);
 void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);
 
index adc54a72666121ce5b38c2c1ab4e2ba94eaf506d..4b886a13e07970d4015aee6fd26a7efcd493f96f 100644 (file)
@@ -623,13 +623,9 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
                        return ret;
                }
 
-               for (i = 0; i < HNAE3_MAX_TC; i++) {
-                       if (!test_bit(i, &tc_info->tc_en))
-                               continue;
-
+               for (i = 0; i < tc_info->num_tc; i++)
                        netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i],
                                            tc_info->tqp_offset[i]);
-               }
        }
 
        ret = netif_set_real_num_tx_queues(netdev, queue_size);
@@ -779,6 +775,11 @@ static int hns3_nic_net_open(struct net_device *netdev)
        if (hns3_nic_resetting(netdev))
                return -EBUSY;
 
+       if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
+               netdev_warn(netdev, "net open repeatedly!\n");
+               return 0;
+       }
+
        netif_carrier_off(netdev);
 
        ret = hns3_nic_set_real_num_queue(netdev);
@@ -1846,7 +1847,6 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
 
 static int hns3_skb_linearize(struct hns3_enet_ring *ring,
                              struct sk_buff *skb,
-                             u8 max_non_tso_bd_num,
                              unsigned int bd_num)
 {
        /* 'bd_num == UINT_MAX' means the skb' fraglist has a
@@ -1863,8 +1863,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
         * will not help.
         */
        if (skb->len > HNS3_MAX_TSO_SIZE ||
-           (!skb_is_gso(skb) && skb->len >
-            HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) {
+           (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
                u64_stats_update_begin(&ring->syncp);
                ring->stats.hw_limitation++;
                u64_stats_update_end(&ring->syncp);
@@ -1899,8 +1898,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
                        goto out;
                }
 
-               if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num,
-                                      bd_num))
+               if (hns3_skb_linearize(ring, skb, bd_num))
                        return -ENOMEM;
 
                bd_num = hns3_tx_bd_count(skb->len);
@@ -3257,6 +3255,7 @@ static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
 {
        hns3_unmap_buffer(ring, &ring->desc_cb[i]);
        ring->desc[i].addr = 0;
+       ring->desc_cb[i].refill = 0;
 }
 
 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i,
@@ -3335,6 +3334,7 @@ static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)
 
        ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
                                         ring->desc_cb[i].page_offset);
+       ring->desc_cb[i].refill = 1;
 
        return 0;
 }
@@ -3364,6 +3364,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
 {
        hns3_unmap_buffer(ring, &ring->desc_cb[i]);
        ring->desc_cb[i] = *res_cb;
+       ring->desc_cb[i].refill = 1;
        ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
                                         ring->desc_cb[i].page_offset);
        ring->desc[i].rx.bd_base_info = 0;
@@ -3372,6 +3373,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
 {
        ring->desc_cb[i].reuse_flag = 0;
+       ring->desc_cb[i].refill = 1;
        ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
                                         ring->desc_cb[i].page_offset);
        ring->desc[i].rx.bd_base_info = 0;
@@ -3478,10 +3480,14 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring)
        int ntc = ring->next_to_clean;
        int ntu = ring->next_to_use;
 
+       if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill))
+               return ring->desc_num;
+
        return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
 }
 
-static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
+/* Return true if there is any allocation failure */
+static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
                                      int cleand_count)
 {
        struct hns3_desc_cb *desc_cb;
@@ -3506,7 +3512,10 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
                                hns3_rl_err(ring_to_netdev(ring),
                                            "alloc rx buffer failed: %d\n",
                                            ret);
-                               break;
+
+                               writel(i, ring->tqp->io_base +
+                                      HNS3_RING_RX_RING_HEAD_REG);
+                               return true;
                        }
                        hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
 
@@ -3519,6 +3528,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
        }
 
        writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
+       return false;
 }
 
 static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
@@ -3823,6 +3833,7 @@ static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)
 {
        ring->desc[ring->next_to_clean].rx.bd_base_info &=
                cpu_to_le32(~BIT(HNS3_RXD_VLD_B));
+       ring->desc_cb[ring->next_to_clean].refill = 0;
        ring->next_to_clean += 1;
 
        if (unlikely(ring->next_to_clean == ring->desc_num))
@@ -4169,6 +4180,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
 {
 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
        int unused_count = hns3_desc_unused(ring);
+       bool failure = false;
        int recv_pkts = 0;
        int err;
 
@@ -4177,9 +4189,9 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
        while (recv_pkts < budget) {
                /* Reuse or realloc buffers */
                if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
-                       hns3_nic_alloc_rx_buffers(ring, unused_count);
-                       unused_count = hns3_desc_unused(ring) -
-                                       ring->pending_buf;
+                       failure = failure ||
+                               hns3_nic_alloc_rx_buffers(ring, unused_count);
+                       unused_count = 0;
                }
 
                /* Poll one pkt */
@@ -4198,11 +4210,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
        }
 
 out:
-       /* Make all data has been write before submit */
-       if (unused_count > 0)
-               hns3_nic_alloc_rx_buffers(ring, unused_count);
-
-       return recv_pkts;
+       return failure ? budget : recv_pkts;
 }
 
 static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
@@ -4865,12 +4873,9 @@ static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
        struct hnae3_tc_info *tc_info = &kinfo->tc_info;
        int i;
 
-       for (i = 0; i < HNAE3_MAX_TC; i++) {
+       for (i = 0; i < tc_info->num_tc; i++) {
                int j;
 
-               if (!test_bit(i, &tc_info->tc_en))
-                       continue;
-
                for (j = 0; j < tc_info->tqp_count[i]; j++) {
                        struct hnae3_queue *q;
 
index 6162d9f88e373bb7a796e46a7de3ed4d7ec7dc3d..f09a61d9c6264e75e2a33ba5f49602d59caefd09 100644 (file)
@@ -186,11 +186,9 @@ enum hns3_nic_state {
 
 #define HNS3_MAX_BD_SIZE                       65535
 #define HNS3_MAX_TSO_BD_NUM                    63U
-#define HNS3_MAX_TSO_SIZE \
-       (HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM)
+#define HNS3_MAX_TSO_SIZE                      1048576U
+#define HNS3_MAX_NON_TSO_SIZE                  9728U
 
-#define HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num) \
-       (HNS3_MAX_BD_SIZE * (max_non_tso_bd_num))
 
 #define HNS3_VECTOR_GL0_OFFSET                 0x100
 #define HNS3_VECTOR_GL1_OFFSET                 0x200
@@ -332,6 +330,7 @@ struct hns3_desc_cb {
        u32 length;     /* length of the buffer */
 
        u16 reuse_flag;
+       u16 refill;
 
        /* desc type, used by the ring user to mark the type of the priv data */
        u16 type;
index 7ea511d59e91ac62767928a14afa428fc7a199c6..5ebd96f6833d6ebb8dc176af3374379d0556d35f 100644 (file)
@@ -334,7 +334,8 @@ static void hns3_selftest_prepare(struct net_device *ndev,
 
 #if IS_ENABLED(CONFIG_VLAN_8021Q)
        /* Disable the vlan filter for selftest does not support it */
-       if (h->ae_algo->ops->enable_vlan_filter)
+       if (h->ae_algo->ops->enable_vlan_filter &&
+           ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
                h->ae_algo->ops->enable_vlan_filter(h, false);
 #endif
 
@@ -359,7 +360,8 @@ static void hns3_selftest_restore(struct net_device *ndev, bool if_running)
                h->ae_algo->ops->halt_autoneg(h, false);
 
 #if IS_ENABLED(CONFIG_VLAN_8021Q)
-       if (h->ae_algo->ops->enable_vlan_filter)
+       if (h->ae_algo->ops->enable_vlan_filter &&
+           ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
                h->ae_algo->ops->enable_vlan_filter(h, true);
 #endif
 
index ac9b69513332bf7c94bfa71d180cc13a52fc8013..9c2eeaa822944fd31c0f7192657db99b8c909a3f 100644 (file)
@@ -467,7 +467,7 @@ err_csq:
        return ret;
 }
 
-static int hclge_firmware_compat_config(struct hclge_dev *hdev)
+static int hclge_firmware_compat_config(struct hclge_dev *hdev, bool en)
 {
        struct hclge_firmware_compat_cmd *req;
        struct hclge_desc desc;
@@ -475,13 +475,16 @@ static int hclge_firmware_compat_config(struct hclge_dev *hdev)
 
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false);
 
-       req = (struct hclge_firmware_compat_cmd *)desc.data;
+       if (en) {
+               req = (struct hclge_firmware_compat_cmd *)desc.data;
 
-       hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
-       hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
-       if (hnae3_dev_phy_imp_supported(hdev))
-               hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1);
-       req->compat = cpu_to_le32(compat);
+               hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
+               hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
+               if (hnae3_dev_phy_imp_supported(hdev))
+                       hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1);
+
+               req->compat = cpu_to_le32(compat);
+       }
 
        return hclge_cmd_send(&hdev->hw, &desc, 1);
 }
@@ -538,7 +541,7 @@ int hclge_cmd_init(struct hclge_dev *hdev)
        /* ask the firmware to enable some features, driver can work without
         * it.
         */
-       ret = hclge_firmware_compat_config(hdev);
+       ret = hclge_firmware_compat_config(hdev, true);
        if (ret)
                dev_warn(&hdev->pdev->dev,
                         "Firmware compatible features not enabled(%d).\n",
@@ -568,6 +571,8 @@ static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
 
 void hclge_cmd_uninit(struct hclge_dev *hdev)
 {
+       hclge_firmware_compat_config(hdev, false);
+
        set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
        /* wait to ensure that the firmware completes the possible left
         * over commands.
index 4a619e5d3f35e3ca159685799b7e19eccf7f8b44..91cb578f56b80a2bd229441d550c5343dbb347ad 100644 (file)
@@ -137,6 +137,15 @@ static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
                                *changed = true;
                        break;
                case IEEE_8021QAZ_TSA_ETS:
+                       /* The hardware will switch to sp mode if bandwidth is
+                        * 0, so limit ets bandwidth must be greater than 0.
+                        */
+                       if (!ets->tc_tx_bw[i]) {
+                               dev_err(&hdev->pdev->dev,
+                                       "tc%u ets bw cannot be 0\n", i);
+                               return -EINVAL;
+                       }
+
                        if (hdev->tm_info.tc_info[i].tc_sch_mode !=
                                HCLGE_SCH_MODE_DWRR)
                                *changed = true;
@@ -247,6 +256,10 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
        }
 
        hclge_tm_schd_info_update(hdev, num_tc);
+       if (num_tc > 1)
+               hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
+       else
+               hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
 
        ret = hclge_ieee_ets_to_tm_info(hdev, ets);
        if (ret)
@@ -306,8 +319,7 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
        u8 i, j, pfc_map, *prio_tc;
        int ret;
 
-       if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
-           hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
+       if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
                return -EINVAL;
 
        if (pfc->pfc_en == hdev->tm_info.pfc_en)
@@ -441,8 +453,6 @@ static int hclge_mqprio_qopt_check(struct hclge_dev *hdev,
 static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
                                   struct tc_mqprio_qopt_offload *mqprio_qopt)
 {
-       int i;
-
        memset(tc_info, 0, sizeof(*tc_info));
        tc_info->num_tc = mqprio_qopt->qopt.num_tc;
        memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map,
@@ -451,9 +461,6 @@ static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
               sizeof_field(struct hnae3_tc_info, tqp_count));
        memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset,
               sizeof_field(struct hnae3_tc_info, tqp_offset));
-
-       for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
-               set_bit(tc_info->prio_tc[i], &tc_info->tc_en);
 }
 
 static int hclge_config_tc(struct hclge_dev *hdev,
@@ -519,12 +526,17 @@ static int hclge_setup_tc(struct hnae3_handle *h,
        return hclge_notify_init_up(hdev);
 
 err_out:
-       /* roll-back */
-       memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info));
-       if (hclge_config_tc(hdev, &kinfo->tc_info))
-               dev_err(&hdev->pdev->dev,
-                       "failed to roll back tc configuration\n");
-
+       if (!tc) {
+               dev_warn(&hdev->pdev->dev,
+                        "failed to destroy mqprio, will active after reset, ret = %d\n",
+                        ret);
+       } else {
+               /* roll-back */
+               memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info));
+               if (hclge_config_tc(hdev, &kinfo->tc_info))
+                       dev_err(&hdev->pdev->dev,
+                               "failed to roll back tc configuration\n");
+       }
        hclge_notify_init_up(hdev);
 
        return ret;
index 87d96f82c3182c74bda6959dc958b42dd177bf35..32f62cd2dd99f4ebcfc0cae8c2a6cf244ccb73e5 100644 (file)
@@ -719,9 +719,9 @@ static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para,
        sprintf(result[(*index)++], "%6u", para->rate);
 }
 
-static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
+static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
+                                 char *buf, int len)
 {
-       char data_str[ARRAY_SIZE(tm_pg_items)][HCLGE_DBG_DATA_STR_LEN];
        struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
        char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str;
        u8 pg_id, sch_mode, weight, pri_bit_map, i, j;
@@ -729,8 +729,10 @@ static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
        int pos = 0;
        int ret;
 
-       for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++)
-               result[i] = &data_str[i][0];
+       for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) {
+               result[i] = data_str;
+               data_str += HCLGE_DBG_DATA_STR_LEN;
+       }
 
        hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
                               NULL, ARRAY_SIZE(tm_pg_items));
@@ -781,6 +783,24 @@ static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
        return 0;
 }
 
+static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
+{
+       char *data_str;
+       int ret;
+
+       data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
+                          HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
+
+       if (!data_str)
+               return -ENOMEM;
+
+       ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len);
+
+       kfree(data_str);
+
+       return ret;
+}
+
 static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev,  char *buf, int len)
 {
        struct hclge_tm_shaper_para shaper_para;
index bb9b026ae88e52661d1e02e11aa8389c9b400d8f..93aa7f2bdc13bc02c5e89a851841cafa188f8445 100644 (file)
@@ -1560,8 +1560,11 @@ static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
 
        /* configure TM QCN hw errors */
        hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_QCN_MEM_INT_CFG, false);
-       if (en)
+       desc.data[0] = cpu_to_le32(HCLGE_TM_QCN_ERR_INT_TYPE);
+       if (en) {
+               desc.data[0] |= cpu_to_le32(HCLGE_TM_QCN_FIFO_INT_EN);
                desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
+       }
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret)
index 07987fb8332ef27b1b6f99b284dc297fe2d20ec6..d811eeefe2c0548d1d1ecb054546601e4d76b0d3 100644 (file)
@@ -50,6 +50,8 @@
 #define HCLGE_PPP_MPF_ECC_ERR_INT3_EN  0x003F
 #define HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK     0x003F
 #define HCLGE_TM_SCH_ECC_ERR_INT_EN    0x3
+#define HCLGE_TM_QCN_ERR_INT_TYPE      0x29
+#define HCLGE_TM_QCN_FIFO_INT_EN       0xFFFF00
 #define HCLGE_TM_QCN_MEM_ERR_INT_EN    0xFFFFFF
 #define HCLGE_NCSI_ERR_INT_EN  0x3
 #define HCLGE_NCSI_ERR_INT_TYPE        0x9
index 47fea8985861d144be3a0dd1ab559a4060448bfb..dcd40cc73082af46d663a466a24276b5986b6454 100644 (file)
@@ -8708,15 +8708,8 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
        }
 
        /* check if we just hit the duplicate */
-       if (!ret) {
-               dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
-                        vport->vport_id, addr);
-               return 0;
-       }
-
-       dev_err(&hdev->pdev->dev,
-               "PF failed to add unicast entry(%pM) in the MAC table\n",
-               addr);
+       if (!ret)
+               return -EEXIST;
 
        return ret;
 }
@@ -8868,7 +8861,13 @@ static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
                } else {
                        set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
                                &vport->state);
-                       break;
+
+                       /* If one unicast mac address is existing in hardware,
+                        * we need to try whether other unicast mac addresses
+                        * are new addresses that can be added.
+                        */
+                       if (ret != -EEXIST)
+                               break;
                }
        }
 }
@@ -12797,8 +12796,12 @@ static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
                        continue;
 
                if (vport->vf_info.trusted) {
-                       uc_en = vport->vf_info.request_uc_en > 0;
-                       mc_en = vport->vf_info.request_mc_en > 0;
+                       uc_en = vport->vf_info.request_uc_en > 0 ||
+                               vport->overflow_promisc_flags &
+                               HNAE3_OVERFLOW_UPE;
+                       mc_en = vport->vf_info.request_mc_en > 0 ||
+                               vport->overflow_promisc_flags &
+                               HNAE3_OVERFLOW_MPE;
                }
                bc_en = vport->vf_info.request_bc_en > 0;
 
@@ -13062,6 +13065,7 @@ static int hclge_init(void)
 
 static void hclge_exit(void)
 {
+       hnae3_unregister_ae_algo_prepare(&ae_algo);
        hnae3_unregister_ae_algo(&ae_algo);
        destroy_workqueue(hclge_wq);
 }
index 44618cc4cca10cd88ca1b24b55e9fe3139641c12..95074e91a8466a3064eab898007511990acf9edf 100644 (file)
@@ -687,12 +687,10 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
 
        for (i = 0; i < HNAE3_MAX_TC; i++) {
                if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) {
-                       set_bit(i, &kinfo->tc_info.tc_en);
                        kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size;
                        kinfo->tc_info.tqp_count[i] = kinfo->rss_size;
                } else {
                        /* Set to default queue if TC is disable */
-                       clear_bit(i, &kinfo->tc_info.tc_en);
                        kinfo->tc_info.tqp_offset[i] = 0;
                        kinfo->tc_info.tqp_count[i] = 1;
                }
@@ -729,14 +727,6 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
        for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
                hdev->tm_info.prio_tc[i] =
                        (i >= hdev->tm_info.num_tc) ? 0 : i;
-
-       /* DCB is enabled if we have more than 1 TC or pfc_en is
-        * non-zero.
-        */
-       if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
-               hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
-       else
-               hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
 }
 
 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
@@ -762,15 +752,17 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
                hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
                for (k = 0; k < hdev->tm_info.num_tc; k++)
                        hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
+               for (; k < HNAE3_MAX_TC; k++)
+                       hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
        }
 }
 
 static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev)
 {
-       if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
+       if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) {
                if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
                        dev_warn(&hdev->pdev->dev,
-                                "DCB is disable, but last mode is FC_PFC\n");
+                                "Only 1 tc used, but last mode is FC_PFC\n");
 
                hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
        } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
@@ -796,7 +788,7 @@ static void hclge_update_fc_mode(struct hclge_dev *hdev)
        }
 }
 
-static void hclge_pfc_info_init(struct hclge_dev *hdev)
+void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
 {
        if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
                hclge_update_fc_mode(hdev);
@@ -812,7 +804,7 @@ static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
 
        hclge_tm_vport_info_update(hdev);
 
-       hclge_pfc_info_init(hdev);
+       hclge_tm_pfc_info_update(hdev);
 }
 
 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
@@ -1558,19 +1550,6 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
        hclge_tm_schd_info_init(hdev);
 }
 
-void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
-{
-       /* DCB is enabled if we have more than 1 TC or pfc_en is
-        * non-zero.
-        */
-       if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
-               hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
-       else
-               hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
-
-       hclge_pfc_info_init(hdev);
-}
-
 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
 {
        int ret;
@@ -1616,7 +1595,7 @@ int hclge_tm_vport_map_update(struct hclge_dev *hdev)
        if (ret)
                return ret;
 
-       if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
+       if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en)
                return 0;
 
        return hclge_tm_bp_setup(hdev);
index 5fdac8685f9552ec9dae748c3ac60ad5a569abd7..bef6b98e2f50c892e4d376bd83c28043df149fa7 100644 (file)
@@ -2273,9 +2273,9 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
                hdev->reset_attempts = 0;
 
                hdev->last_reset_time = jiffies;
-               while ((hdev->reset_type =
-                       hclgevf_get_reset_level(hdev, &hdev->reset_pending))
-                      != HNAE3_NONE_RESET)
+               hdev->reset_type =
+                       hclgevf_get_reset_level(hdev, &hdev->reset_pending);
+               if (hdev->reset_type != HNAE3_NONE_RESET)
                        hclgevf_reset(hdev);
        } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
                                      &hdev->reset_state)) {
index 3e54017a2a5ba8f8003b12f3723ecf65c8e04a3d..07fdab58001d9175a124be5ab70038c40f35add4 100644 (file)
@@ -354,7 +354,7 @@ static int hns_mdio_reset(struct mii_bus *bus)
 
        if (dev_of_node(bus->parent)) {
                if (!mdio_dev->subctrl_vbase) {
-                       dev_err(&bus->dev, "mdio sys ctl reg has not maped\n");
+                       dev_err(&bus->dev, "mdio sys ctl reg has not mapped\n");
                        return -ENODEV;
                }
 
index a4579b3401204fe0698baf65a79842922112b4ee..6aa6ff89a76511ff840e545e3bd0dc9ec5c4faff 100644 (file)
@@ -4708,14 +4708,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
                return 0;
        }
 
-       if (adapter->failover_pending) {
-               adapter->init_done_rc = -EAGAIN;
-               netdev_dbg(netdev, "Failover pending, ignoring login response\n");
-               complete(&adapter->init_done);
-               /* login response buffer will be released on reset */
-               return 0;
-       }
-
        netdev->mtu = adapter->req_mtu - ETH_HLEN;
 
        netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
index 373eb027b925207fdc70b47e413c4055559a2e2d..09ae1939e6db4c7cc32e89708aa871763a543d91 100644 (file)
@@ -2437,11 +2437,15 @@ static void e100_get_drvinfo(struct net_device *netdev,
                sizeof(info->bus_info));
 }
 
-#define E100_PHY_REGS 0x1C
+#define E100_PHY_REGS 0x1D
 static int e100_get_regs_len(struct net_device *netdev)
 {
        struct nic *nic = netdev_priv(netdev);
-       return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
+
+       /* We know the number of registers, and the size of the dump buffer.
+        * Calculate the total size in bytes.
+        */
+       return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf);
 }
 
 static void e100_get_regs(struct net_device *netdev,
@@ -2455,14 +2459,18 @@ static void e100_get_regs(struct net_device *netdev,
        buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
                ioread8(&nic->csr->scb.cmd_lo) << 16 |
                ioread16(&nic->csr->scb.status);
-       for (i = E100_PHY_REGS; i >= 0; i--)
-               buff[1 + E100_PHY_REGS - i] =
-                       mdio_read(netdev, nic->mii.phy_id, i);
+       for (i = 0; i < E100_PHY_REGS; i++)
+               /* Note that we read the registers in reverse order. This
+                * ordering is the ABI apparently used by ethtool and other
+                * applications.
+                */
+               buff[1 + i] = mdio_read(netdev, nic->mii.phy_id,
+                                       E100_PHY_REGS - 1 - i);
        memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
        e100_exec_cb(nic, NULL, e100_dump);
        msleep(10);
-       memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
-               sizeof(nic->mem->dump_buf));
+       memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf,
+              sizeof(nic->mem->dump_buf));
 }
 
 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
index 5b2143f4b1f85fada39e3509508d7ed31be46abb..3178efd980066c92397807328148c7e37d39c134 100644 (file)
@@ -113,7 +113,8 @@ enum e1000_boards {
        board_pch2lan,
        board_pch_lpt,
        board_pch_spt,
-       board_pch_cnp
+       board_pch_cnp,
+       board_pch_tgp
 };
 
 struct e1000_ps_page {
@@ -499,6 +500,7 @@ extern const struct e1000_info e1000_pch2_info;
 extern const struct e1000_info e1000_pch_lpt_info;
 extern const struct e1000_info e1000_pch_spt_info;
 extern const struct e1000_info e1000_pch_cnp_info;
+extern const struct e1000_info e1000_pch_tgp_info;
 extern const struct e1000_info e1000_es2_info;
 
 void e1000e_ptp_init(struct e1000_adapter *adapter);
index 60c582a1682104dce49bc554c6ae7178837644d0..5e4fc9b4e2adb00c467dc6c9f6000b5daa5f54cc 100644 (file)
@@ -4813,7 +4813,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
 {
        struct e1000_mac_info *mac = &hw->mac;
-       u32 ctrl_ext, txdctl, snoop;
+       u32 ctrl_ext, txdctl, snoop, fflt_dbg;
        s32 ret_val;
        u16 i;
 
@@ -4872,6 +4872,15 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
                snoop = (u32)~(PCIE_NO_SNOOP_ALL);
        e1000e_set_pcie_no_snoop(hw, snoop);
 
+       /* Enable workaround for packet loss issue on TGP PCH
+        * Do not gate DMA clock from the modPHY block
+        */
+       if (mac->type >= e1000_pch_tgp) {
+               fflt_dbg = er32(FFLT_DBG);
+               fflt_dbg |= E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK;
+               ew32(FFLT_DBG, fflt_dbg);
+       }
+
        ctrl_ext = er32(CTRL_EXT);
        ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
        ew32(CTRL_EXT, ctrl_ext);
@@ -5992,3 +6001,23 @@ const struct e1000_info e1000_pch_cnp_info = {
        .phy_ops                = &ich8_phy_ops,
        .nvm_ops                = &spt_nvm_ops,
 };
+
+const struct e1000_info e1000_pch_tgp_info = {
+       .mac                    = e1000_pch_tgp,
+       .flags                  = FLAG_IS_ICH
+                                 | FLAG_HAS_WOL
+                                 | FLAG_HAS_HW_TIMESTAMP
+                                 | FLAG_HAS_CTRLEXT_ON_LOAD
+                                 | FLAG_HAS_AMT
+                                 | FLAG_HAS_FLASH
+                                 | FLAG_HAS_JUMBO_FRAMES
+                                 | FLAG_APME_IN_WUC,
+       .flags2                 = FLAG2_HAS_PHY_STATS
+                                 | FLAG2_HAS_EEE,
+       .pba                    = 26,
+       .max_hw_frame_size      = 9022,
+       .get_variants           = e1000_get_variants_ich8lan,
+       .mac_ops                = &ich8_mac_ops,
+       .phy_ops                = &ich8_phy_ops,
+       .nvm_ops                = &spt_nvm_ops,
+};
index d6a092e5ee7492d60961cb3115e786b9b3147e8e..2504b11c3169fa6886403b163bb8705729ffda56 100644 (file)
 /* Proprietary Latency Tolerance Reporting PCI Capability */
 #define E1000_PCI_LTR_CAP_LPT          0xA8
 
+/* Don't gate wake DMA clock */
+#define E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK  0x1000
+
 void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
 void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
                                                  bool state);
index 900b3ab998bd8c433a6055d63f4503617d33aa96..ebcb2a30add095e4e8d7193ef4ff374a0919aded 100644 (file)
@@ -51,6 +51,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
        [board_pch_lpt]         = &e1000_pch_lpt_info,
        [board_pch_spt]         = &e1000_pch_spt_info,
        [board_pch_cnp]         = &e1000_pch_cnp_info,
+       [board_pch_tgp]         = &e1000_pch_tgp_info,
 };
 
 struct e1000_reg_info {
@@ -7896,28 +7897,28 @@ static const struct pci_device_id e1000_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_cnp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_tgp },
 
        { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
 };
index 2f20980dd9a58aa8e4251c7b1cf6b69853e83175..e04b540cedc85c3c4a435600274b33e401809c67 100644 (file)
@@ -4871,7 +4871,8 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
 {
        int i;
 
-       i40e_free_misc_vector(pf);
+       if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))
+               i40e_free_misc_vector(pf);
 
        i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
                      I40E_IWARP_IRQ_PILE_ID);
@@ -10113,7 +10114,7 @@ static int i40e_get_capabilities(struct i40e_pf *pf,
                if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
                        /* retry with a larger buffer */
                        buf_len = data_size;
-               } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
+               } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
                        dev_info(&pf->pdev->dev,
                                 "capability discovery failed, err %s aq_err %s\n",
                                 i40e_stat_str(&pf->hw, err),
index 23762a7ef740b261a950e2e454a5a6c0d82d682a..cada4e0e40b48eda0b40a3df56ab7576a4348123 100644 (file)
@@ -1965,7 +1965,6 @@ static void iavf_watchdog_task(struct work_struct *work)
                }
                adapter->aq_required = 0;
                adapter->current_op = VIRTCHNL_OP_UNKNOWN;
-               mutex_unlock(&adapter->crit_lock);
                queue_delayed_work(iavf_wq,
                                   &adapter->watchdog_task,
                                   msecs_to_jiffies(10));
index 2fb81e359cdfdd4de8128f3d2c7b9b1dfdc97e03..df5ad4de1f00e40ea0ebf3fe087c17ec2f1c135c 100644 (file)
@@ -25,6 +25,8 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
        case ICE_DEV_ID_E810C_BACKPLANE:
        case ICE_DEV_ID_E810C_QSFP:
        case ICE_DEV_ID_E810C_SFP:
+       case ICE_DEV_ID_E810_XXV_BACKPLANE:
+       case ICE_DEV_ID_E810_XXV_QSFP:
        case ICE_DEV_ID_E810_XXV_SFP:
                hw->mac_type = ICE_MAC_E810;
                break;
index 9d8194671f6a6abee4d8682dd314ab6d3346208d..ef4392e6e2444871c611affdd389964146ddbdb9 100644 (file)
 #define ICE_DEV_ID_E810C_QSFP          0x1592
 /* Intel(R) Ethernet Controller E810-C for SFP */
 #define ICE_DEV_ID_E810C_SFP           0x1593
+/* Intel(R) Ethernet Controller E810-XXV for backplane */
+#define ICE_DEV_ID_E810_XXV_BACKPLANE  0x1599
+/* Intel(R) Ethernet Controller E810-XXV for QSFP */
+#define ICE_DEV_ID_E810_XXV_QSFP       0x159A
 /* Intel(R) Ethernet Controller E810-XXV for SFP */
 #define ICE_DEV_ID_E810_XXV_SFP                0x159B
 /* Intel(R) Ethernet Connection E823-C for backplane */
index 14afce82ef6312e80300ea16c2b4eb23479f857f..da7288bdc9a3fe37b6c1c8e4430e14dd0cc6bf57 100644 (file)
@@ -63,7 +63,8 @@ static int ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx)
 {
        struct ice_hw *hw = &pf->hw;
 
-       snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u", hw->api_maj_ver, hw->api_min_ver);
+       snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->api_maj_ver,
+                hw->api_min_ver, hw->api_patch);
 
        return 0;
 }
index 06ac9badee7748e57a99e14a075fec85f1f5e8a6..1ac96dc66d0db86ef5962c58aef0ff10e1679d63 100644 (file)
@@ -1668,7 +1668,7 @@ static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
        for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
                if (hw->tnl.tbl[i].valid &&
                    hw->tnl.tbl[i].type == type &&
-                   idx--)
+                   idx-- == 0)
                        return i;
 
        WARN_ON_ONCE(1);
@@ -1828,7 +1828,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
        u16 index;
 
        tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
-       index = ice_tunnel_idx_to_entry(&pf->hw, idx, tnl_type);
+       index = ice_tunnel_idx_to_entry(&pf->hw, tnl_type, idx);
 
        status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
        if (status) {
index dde9802c6c72903302e74a9921b6d34576fd16c1..b718e196af2a4a02bf58e0498513f81ffa7b42c5 100644 (file)
@@ -2841,6 +2841,7 @@ void ice_napi_del(struct ice_vsi *vsi)
  */
 int ice_vsi_release(struct ice_vsi *vsi)
 {
+       enum ice_status err;
        struct ice_pf *pf;
 
        if (!vsi->back)
@@ -2912,6 +2913,10 @@ int ice_vsi_release(struct ice_vsi *vsi)
 
        ice_fltr_remove_all(vsi);
        ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
+       err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
+       if (err)
+               dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
+                       vsi->vsi_num, err);
        ice_vsi_delete(vsi);
        ice_vsi_free_q_vectors(vsi);
 
@@ -3092,6 +3097,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
        prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
 
        ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
+       ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
+       if (ret)
+               dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
+                       vsi->vsi_num, ret);
        ice_vsi_free_q_vectors(vsi);
 
        /* SR-IOV determines needed MSIX resources all at once instead of per
index 0d6c143f665327793f29efd464bb2058d532d713..06fa93e597fbc6745a46ac30c30cbd61e9b0105f 100644 (file)
@@ -4224,6 +4224,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
        if (!pf)
                return -ENOMEM;
 
+       /* initialize Auxiliary index to invalid value */
+       pf->aux_idx = -1;
+
        /* set up for high or low DMA */
        err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
        if (err)
@@ -4615,7 +4618,8 @@ static void ice_remove(struct pci_dev *pdev)
 
        ice_aq_cancel_waiting_tasks(pf);
        ice_unplug_aux_dev(pf);
-       ida_free(&ice_aux_ida, pf->aux_idx);
+       if (pf->aux_idx >= 0)
+               ida_free(&ice_aux_ida, pf->aux_idx);
        set_bit(ICE_DOWN, pf->state);
 
        mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
@@ -5016,6 +5020,8 @@ static const struct pci_device_id ice_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
+       { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
+       { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
index 05cc5870e4efb52b01ef1cf649b4bf318d8e498d..80380aed8882d5681411cd04648a38679877fbd0 100644 (file)
@@ -1313,22 +1313,21 @@ ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
 {
        u8 idx;
 
-       spin_lock(&tx->lock);
-
        for (idx = 0; idx < tx->len; idx++) {
                u8 phy_idx = idx + tx->quad_offset;
 
-               /* Clear any potential residual timestamp in the PHY block */
-               if (!pf->hw.reset_ongoing)
-                       ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
-
+               spin_lock(&tx->lock);
                if (tx->tstamps[idx].skb) {
                        dev_kfree_skb_any(tx->tstamps[idx].skb);
                        tx->tstamps[idx].skb = NULL;
                }
-       }
+               clear_bit(idx, tx->in_use);
+               spin_unlock(&tx->lock);
 
-       spin_unlock(&tx->lock);
+               /* Clear any potential residual timestamp in the PHY block */
+               if (!pf->hw.reset_ongoing)
+                       ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
+       }
 }
 
 /**
index 9f07b66417059bb0202d0167a527533000c55cb4..2d9b10277186b6c475674db6d6ad31a04c716506 100644 (file)
@@ -2070,6 +2070,19 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
        return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
 }
 
+/**
+ * ice_rm_vsi_rdma_cfg - remove VSI and its RDMA children nodes
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function clears the VSI and its RDMA children nodes from scheduler tree
+ * for all TCs.
+ */
+enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
+{
+       return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA);
+}
+
 /**
  * ice_get_agg_info - get the aggregator ID
  * @hw: pointer to the hardware structure
index 9beef8f0ec76089d8644fba2fb32ad26d3280af3..fdf7a5882f076a893085c67978feb4e5b4bf4c99 100644 (file)
@@ -89,6 +89,7 @@ enum ice_status
 ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
                  u8 owner, bool enable);
 enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
+enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle);
 
 /* Tx scheduler rate limiter functions */
 enum ice_status
index 4461f8b9a864b4bf33e196ea3c731afdafbbac95..4e0203336c6bfe2cc5d8ef848043ac84ec57649b 100644 (file)
@@ -22,8 +22,8 @@
 #define IGC_DEV_ID_I220_V                      0x15F7
 #define IGC_DEV_ID_I225_K                      0x3100
 #define IGC_DEV_ID_I225_K2                     0x3101
+#define IGC_DEV_ID_I226_K                      0x3102
 #define IGC_DEV_ID_I225_LMVP                   0x5502
-#define IGC_DEV_ID_I226_K                      0x5504
 #define IGC_DEV_ID_I225_IT                     0x0D9F
 #define IGC_DEV_ID_I226_LM                     0x125B
 #define IGC_DEV_ID_I226_V                      0x125C
index fc26e4ddeb0dcf48922a1e91bbc5049874e6e5db..beda8e0ef7d421aa08c4521c3ac8cabe30539621 100644 (file)
@@ -3208,7 +3208,7 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
                max_combined = ixgbe_max_rss_indices(adapter);
        }
 
-       return max_combined;
+       return min_t(int, max_combined, num_online_cpus());
 }
 
 static void ixgbe_get_channels(struct net_device *dev,
index 24e06ba6f5e93de93fe167be3702421f68aadf66..13c4782b920a79f695553e5d3a11442ba74720f0 100644 (file)
@@ -10112,6 +10112,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        struct bpf_prog *old_prog;
        bool need_reset;
+       int num_queues;
 
        if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
                return -EINVAL;
@@ -10161,11 +10162,14 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
        /* Kick start the NAPI context if there is an AF_XDP socket open
         * on that queue id. This so that receiving will start.
         */
-       if (need_reset && prog)
-               for (i = 0; i < adapter->num_rx_queues; i++)
+       if (need_reset && prog) {
+               num_queues = min_t(int, adapter->num_rx_queues,
+                                  adapter->num_xdp_queues);
+               for (i = 0; i < num_queues; i++)
                        if (adapter->xdp_ring[i]->xsk_pool)
                                (void)ixgbe_xsk_wakeup(adapter->netdev, i,
                                                       XDP_WAKEUP_RX);
+       }
 
        return 0;
 }
index cf97985628ab91d4bf2940adb24cd70b18a34e1f..02e77ffe5c3e4f68e3828482f07c2f2a12fdee68 100644 (file)
@@ -155,6 +155,8 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
        u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {};
        int err;
 
+       mlx5_debug_cq_remove(dev, cq);
+
        mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq);
        mlx5_eq_del_cq(&cq->eq->core, cq);
 
@@ -162,16 +164,13 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
        MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
        MLX5_SET(destroy_cq_in, in, uid, cq->uid);
        err = mlx5_cmd_exec_in(dev, destroy_cq, in);
-       if (err)
-               return err;
 
        synchronize_irq(cq->irqn);
 
-       mlx5_debug_cq_remove(dev, cq);
        mlx5_cq_put(cq);
        wait_for_completion(&cq->free);
 
-       return 0;
+       return err;
 }
 EXPORT_SYMBOL(mlx5_core_destroy_cq);
 
index 7b8c8187543a9d3ae897459b6d3c16c5092cffb1..03a7a4ce5cd5ec42ba18a80f642183cd37a15085 100644 (file)
@@ -252,6 +252,7 @@ struct mlx5e_params {
        struct {
                u16 mode;
                u8 num_tc;
+               struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
        } mqprio;
        bool rx_cqe_compress_def;
        bool tunneled_offload_en;
@@ -845,6 +846,7 @@ struct mlx5e_priv {
        struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
        struct mlx5e_channel_stats trap_stats;
        struct mlx5e_ptp_stats     ptp_stats;
+       u16                        stats_nch;
        u16                        max_nch;
        u8                         max_opened_tc;
        bool                       tx_ptp_opened;
@@ -1100,12 +1102,6 @@ int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
                                 struct ethtool_pauseparam *pauseparam);
 
 /* mlx5e generic netdev management API */
-static inline unsigned int
-mlx5e_calc_max_nch(struct mlx5e_priv *priv, const struct mlx5e_profile *profile)
-{
-       return priv->netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
-}
-
 static inline bool
 mlx5e_tx_mpwqe_supported(struct mlx5_core_dev *mdev)
 {
@@ -1114,11 +1110,13 @@ mlx5e_tx_mpwqe_supported(struct mlx5_core_dev *mdev)
 }
 
 int mlx5e_priv_init(struct mlx5e_priv *priv,
+                   const struct mlx5e_profile *profile,
                    struct net_device *netdev,
                    struct mlx5_core_dev *mdev);
 void mlx5e_priv_cleanup(struct mlx5e_priv *priv);
 struct net_device *
-mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int rxqs);
+mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
+                   unsigned int txqs, unsigned int rxqs);
 int mlx5e_attach_netdev(struct mlx5e_priv *priv);
 void mlx5e_detach_netdev(struct mlx5e_priv *priv);
 void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
index 41684a6c44e9947f619842f665f210b3db6ce753..a88a1a48229f60f670ae455dd58f5d95152f13e0 100644 (file)
@@ -199,6 +199,9 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
 int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
 void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
 
+int mlx5e_fs_init(struct mlx5e_priv *priv);
+void mlx5e_fs_cleanup(struct mlx5e_priv *priv);
+
 int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int  trap_id, int tir_num);
 void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
 int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int  trap_id, int tir_num);
index ac44bbe95c5c1b653366252662fddb54e92188fa..d290d7276b8d99aacaa93167085eaac55814535f 100644 (file)
@@ -35,7 +35,7 @@ static void mlx5e_hv_vhca_fill_stats(struct mlx5e_priv *priv, void *data,
 {
        int ch, i = 0;
 
-       for (ch = 0; ch < priv->max_nch; ch++) {
+       for (ch = 0; ch < priv->stats_nch; ch++) {
                void *buf = data + i;
 
                if (WARN_ON_ONCE(buf +
@@ -51,7 +51,7 @@ static void mlx5e_hv_vhca_fill_stats(struct mlx5e_priv *priv, void *data,
 static int mlx5e_hv_vhca_stats_buf_size(struct mlx5e_priv *priv)
 {
        return (sizeof(struct mlx5e_hv_vhca_per_ring_stats) *
-               priv->max_nch);
+               priv->stats_nch);
 }
 
 static void mlx5e_hv_vhca_stats_work(struct work_struct *work)
@@ -100,7 +100,7 @@ static void mlx5e_hv_vhca_stats_control(struct mlx5_hv_vhca_agent *agent,
        sagent = &priv->stats_agent;
 
        block->version = MLX5_HV_VHCA_STATS_VERSION;
-       block->rings   = priv->max_nch;
+       block->rings   = priv->stats_nch;
 
        if (!block->command) {
                cancel_delayed_work_sync(&priv->stats_agent.work);
index ee688dec67a99c87bf19022c07a0a930949d8e9c..3a86f66d1295588ac3e77e0e55b110cd619532bb 100644 (file)
@@ -13,8 +13,6 @@ struct mlx5e_ptp_fs {
        bool valid;
 };
 
-#define MLX5E_PTP_CHANNEL_IX 0
-
 struct mlx5e_ptp_params {
        struct mlx5e_params params;
        struct mlx5e_sq_param txq_sq_param;
@@ -509,6 +507,7 @@ static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
        rq->mdev         = mdev;
        rq->hw_mtu       = MLX5E_SW2HW_MTU(params, params->sw_mtu);
        rq->stats        = &c->priv->ptp_stats.rq;
+       rq->ix           = MLX5E_PTP_CHANNEL_IX;
        rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
        err = mlx5e_rq_set_handlers(rq, params, false);
        if (err)
index c96668bd701cd8409b727b68d9d98ae4dfe9e3c7..a71a32e00ebb9f4ffd9c3704d0dc2e92e97c7f66 100644 (file)
@@ -8,6 +8,8 @@
 #include "en_stats.h"
 #include <linux/ptp_classify.h>
 
+#define MLX5E_PTP_CHANNEL_IX 0
+
 struct mlx5e_ptpsq {
        struct mlx5e_txqsq       txqsq;
        struct mlx5e_cq          ts_cq;
index b5ddaa82755f170eae6c6a17d70861f9013f23b7..c6d2f8c78db71ab2342cbb368de174831724e102 100644 (file)
@@ -475,9 +475,6 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
                esw_warn(mdev, "Failed to allocate bridge offloads workqueue\n");
                goto err_alloc_wq;
        }
-       INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work);
-       queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
-                          msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
 
        br_offloads->nb.notifier_call = mlx5_esw_bridge_switchdev_event;
        err = register_switchdev_notifier(&br_offloads->nb);
@@ -500,6 +497,9 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
                         err);
                goto err_register_netdev;
        }
+       INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work);
+       queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
+                          msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
        return;
 
 err_register_netdev:
@@ -523,10 +523,10 @@ void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
        if (!br_offloads)
                return;
 
+       cancel_delayed_work_sync(&br_offloads->update_work);
        unregister_netdevice_notifier(&br_offloads->netdev_nb);
        unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
        unregister_switchdev_notifier(&br_offloads->nb);
-       cancel_delayed_work(&br_offloads->update_work);
        destroy_workqueue(br_offloads->wq);
        rtnl_lock();
        mlx5_esw_bridge_cleanup(esw);
index b4e98681879470a30f93346cb7dc66680228d9de..4a13ef561587d8797e08e52bdcb5e6cb4d9049a6 100644 (file)
@@ -10,6 +10,8 @@
 #include "en_tc.h"
 #include "rep/tc.h"
 #include "rep/neigh.h"
+#include "lag.h"
+#include "lag_mp.h"
 
 struct mlx5e_tc_tun_route_attr {
        struct net_device *out_dev;
index 33de8f0092a66ce6d8c51987b669ef75b4507565..fb5397324aa4f2b597a1f1b0bcd355836e0c382f 100644 (file)
@@ -141,8 +141,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
         * Pkt: MAC  IP     ESP  IP    L4
         *
         * Transport Mode:
-        * SWP:      OutL3       InL4
-        *           InL3
+        * SWP:      OutL3       OutL4
         * Pkt: MAC  IP     ESP  L4
         *
         * Tunnel(VXLAN TCP/UDP) over Transport Mode
@@ -171,31 +170,35 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
                return;
 
        if (!xo->inner_ipproto) {
-               eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
-               eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
-               if (skb->protocol == htons(ETH_P_IPV6))
-                       eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
-               if (xo->proto == IPPROTO_UDP)
+               switch (xo->proto) {
+               case IPPROTO_UDP:
+                       eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
+                       fallthrough;
+               case IPPROTO_TCP:
+                       /* IP | ESP | TCP */
+                       eseg->swp_outer_l4_offset = skb_inner_transport_offset(skb) / 2;
+                       break;
+               default:
+                       break;
+               }
+       } else {
+               /* Tunnel(VXLAN TCP/UDP) over Transport Mode */
+               switch (xo->inner_ipproto) {
+               case IPPROTO_UDP:
                        eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
-               return;
-       }
-
-       /* Tunnel(VXLAN TCP/UDP) over Transport Mode */
-       switch (xo->inner_ipproto) {
-       case IPPROTO_UDP:
-               eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
-               fallthrough;
-       case IPPROTO_TCP:
-               eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
-               eseg->swp_inner_l4_offset = (skb->csum_start + skb->head - skb->data) / 2;
-               if (skb->protocol == htons(ETH_P_IPV6))
-                       eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
-               break;
-       default:
-               break;
+                       fallthrough;
+               case IPPROTO_TCP:
+                       eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+                       eseg->swp_inner_l4_offset =
+                               (skb->csum_start + skb->head - skb->data) / 2;
+                       if (skb->protocol == htons(ETH_P_IPV6))
+                               eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+                       break;
+               default:
+                       break;
+               }
        }
 
-       return;
 }
 
 void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
index 306fb5d6a36d3582ad1439cb8fca35830b004e5e..9d451b8ee467c7b41d784093d933cd2b4f82fb05 100644 (file)
@@ -2036,6 +2036,17 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
        }
 
        new_params = priv->channels.params;
+       /* Don't allow enabling TX-port-TS if MQPRIO mode channel  offload is
+        * active, since it defines explicitly which TC accepts the packet.
+        * This conflicts with TX-port-TS hijacking the PTP traffic to a specific
+        * HW TX-queue.
+        */
+       if (enable && new_params.mqprio.mode == TC_MQPRIO_MODE_CHANNEL) {
+               netdev_err(priv->netdev,
+                          "%s: MQPRIO mode channel offload is active, cannot set the TX-port-TS\n",
+                          __func__);
+               return -EINVAL;
+       }
        MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_PORT_TS, enable);
        /* No need to verify SQ stop room as
         * ptpsq.txqsq.stop_room <= generic_sq->stop_room, and both
index c06b4b938ae796807ad0a06a4b1978c0c757991f..d226cc5ab1d168a56692d9dc4824a5725ef7a09d 100644 (file)
@@ -1186,10 +1186,6 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
        struct mlx5e_flow_table *ft;
        int err;
 
-       priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
-       if (!priv->fs.vlan)
-               return -ENOMEM;
-
        ft = &priv->fs.vlan->ft;
        ft->num_groups = 0;
 
@@ -1198,10 +1194,8 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
        ft_attr.prio = MLX5E_NIC_PRIO;
 
        ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
-       if (IS_ERR(ft->t)) {
-               err = PTR_ERR(ft->t);
-               goto err_free_t;
-       }
+       if (IS_ERR(ft->t))
+               return PTR_ERR(ft->t);
 
        ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
        if (!ft->g) {
@@ -1221,9 +1215,6 @@ err_free_g:
        kfree(ft->g);
 err_destroy_vlan_table:
        mlx5_destroy_flow_table(ft->t);
-err_free_t:
-       kvfree(priv->fs.vlan);
-       priv->fs.vlan = NULL;
 
        return err;
 }
@@ -1232,7 +1223,6 @@ static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
 {
        mlx5e_del_vlan_rules(priv);
        mlx5e_destroy_flow_table(&priv->fs.vlan->ft);
-       kvfree(priv->fs.vlan);
 }
 
 static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
@@ -1351,3 +1341,17 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
        mlx5e_arfs_destroy_tables(priv);
        mlx5e_ethtool_cleanup_steering(priv);
 }
+
+int mlx5e_fs_init(struct mlx5e_priv *priv)
+{
+       priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
+       if (!priv->fs.vlan)
+               return -ENOMEM;
+       return 0;
+}
+
+void mlx5e_fs_cleanup(struct mlx5e_priv *priv)
+{
+       kvfree(priv->fs.vlan);
+       priv->fs.vlan = NULL;
+}
index 3fd515e7bf3024cf8f2cc15420518a0682d49664..41ef6eb70a5852e5b7df70b6646edace7a11ad9f 100644 (file)
@@ -2264,7 +2264,7 @@ void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv)
 }
 
 static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc,
-                               struct tc_mqprio_qopt_offload *mqprio)
+                               struct netdev_tc_txq *tc_to_txq)
 {
        int tc, err;
 
@@ -2282,11 +2282,8 @@ static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc,
        for (tc = 0; tc < ntc; tc++) {
                u16 count, offset;
 
-               /* For DCB mode, map netdev TCs to offset 0
-                * We have our own UP to TXQ mapping for QoS
-                */
-               count = mqprio ? mqprio->qopt.count[tc] : nch;
-               offset = mqprio ? mqprio->qopt.offset[tc] : 0;
+               count = tc_to_txq[tc].count;
+               offset = tc_to_txq[tc].offset;
                netdev_set_tc_queue(netdev, tc, count, offset);
        }
 
@@ -2315,19 +2312,24 @@ int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
 
 static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
 {
+       struct netdev_tc_txq old_tc_to_txq[TC_MAX_QUEUE], *tc_to_txq;
        struct net_device *netdev = priv->netdev;
        int old_num_txqs, old_ntc;
        int num_rxqs, nch, ntc;
        int err;
+       int i;
 
        old_num_txqs = netdev->real_num_tx_queues;
        old_ntc = netdev->num_tc ? : 1;
+       for (i = 0; i < ARRAY_SIZE(old_tc_to_txq); i++)
+               old_tc_to_txq[i] = netdev->tc_to_txq[i];
 
        nch = priv->channels.params.num_channels;
-       ntc = mlx5e_get_dcb_num_tc(&priv->channels.params);
+       ntc = priv->channels.params.mqprio.num_tc;
        num_rxqs = nch * priv->profile->rq_groups;
+       tc_to_txq = priv->channels.params.mqprio.tc_to_txq;
 
-       err = mlx5e_netdev_set_tcs(netdev, nch, ntc, NULL);
+       err = mlx5e_netdev_set_tcs(netdev, nch, ntc, tc_to_txq);
        if (err)
                goto err_out;
        err = mlx5e_update_tx_netdev_queues(priv);
@@ -2350,11 +2352,14 @@ err_txqs:
        WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs));
 
 err_tcs:
-       mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc, NULL);
+       WARN_ON_ONCE(mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc,
+                                         old_tc_to_txq));
 err_out:
        return err;
 }
 
+static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_netdev_queues);
+
 static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
                                           struct mlx5e_params *params)
 {
@@ -2861,6 +2866,58 @@ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
        return 0;
 }
 
+static void mlx5e_mqprio_build_default_tc_to_txq(struct netdev_tc_txq *tc_to_txq,
+                                                int ntc, int nch)
+{
+       int tc;
+
+       memset(tc_to_txq, 0, sizeof(*tc_to_txq) * TC_MAX_QUEUE);
+
+       /* Map netdev TCs to offset 0.
+        * We have our own UP to TXQ mapping for DCB mode of QoS
+        */
+       for (tc = 0; tc < ntc; tc++) {
+               tc_to_txq[tc] = (struct netdev_tc_txq) {
+                       .count = nch,
+                       .offset = 0,
+               };
+       }
+}
+
+static void mlx5e_mqprio_build_tc_to_txq(struct netdev_tc_txq *tc_to_txq,
+                                        struct tc_mqprio_qopt *qopt)
+{
+       int tc;
+
+       for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
+               tc_to_txq[tc] = (struct netdev_tc_txq) {
+                       .count = qopt->count[tc],
+                       .offset = qopt->offset[tc],
+               };
+       }
+}
+
+static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc)
+{
+       params->mqprio.mode = TC_MQPRIO_MODE_DCB;
+       params->mqprio.num_tc = num_tc;
+       mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc,
+                                            params->num_channels);
+}
+
+static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params,
+                                           struct tc_mqprio_qopt *qopt)
+{
+       params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
+       params->mqprio.num_tc = qopt->num_tc;
+       mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt);
+}
+
+static void mlx5e_params_mqprio_reset(struct mlx5e_params *params)
+{
+       mlx5e_params_mqprio_dcb_set(params, 1);
+}
+
 static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
                                     struct tc_mqprio_qopt *mqprio)
 {
@@ -2874,8 +2931,7 @@ static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
                return -EINVAL;
 
        new_params = priv->channels.params;
-       new_params.mqprio.mode = TC_MQPRIO_MODE_DCB;
-       new_params.mqprio.num_tc = tc ? tc : 1;
+       mlx5e_params_mqprio_dcb_set(&new_params, tc ? tc : 1);
 
        err = mlx5e_safe_switch_params(priv, &new_params,
                                       mlx5e_num_channels_changed_ctx, NULL, true);
@@ -2889,9 +2945,17 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
                                         struct tc_mqprio_qopt_offload *mqprio)
 {
        struct net_device *netdev = priv->netdev;
+       struct mlx5e_ptp *ptp_channel;
        int agg_count = 0;
        int i;
 
+       ptp_channel = priv->channels.ptp;
+       if (ptp_channel && test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state)) {
+               netdev_err(netdev,
+                          "Cannot activate MQPRIO mode channel since it conflicts with TX port TS\n");
+               return -EINVAL;
+       }
+
        if (mqprio->qopt.offset[0] != 0 || mqprio->qopt.num_tc < 1 ||
            mqprio->qopt.num_tc > MLX5E_MAX_NUM_MQPRIO_CH_TC)
                return -EINVAL;
@@ -2917,8 +2981,8 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
                agg_count += mqprio->qopt.count[i];
        }
 
-       if (priv->channels.params.num_channels < agg_count) {
-               netdev_err(netdev, "Num of queues (%d) exceeds available (%d)\n",
+       if (priv->channels.params.num_channels != agg_count) {
+               netdev_err(netdev, "Num of queues (%d) does not match available (%d)\n",
                           agg_count, priv->channels.params.num_channels);
                return -EINVAL;
        }
@@ -2926,25 +2990,12 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
        return 0;
 }
 
-static int mlx5e_mqprio_channel_set_tcs_ctx(struct mlx5e_priv *priv, void *ctx)
-{
-       struct tc_mqprio_qopt_offload *mqprio = (struct tc_mqprio_qopt_offload *)ctx;
-       struct net_device *netdev = priv->netdev;
-       u8 num_tc;
-
-       if (priv->channels.params.mqprio.mode != TC_MQPRIO_MODE_CHANNEL)
-               return -EINVAL;
-
-       num_tc = priv->channels.params.mqprio.num_tc;
-       mlx5e_netdev_set_tcs(netdev, 0, num_tc, mqprio);
-
-       return 0;
-}
-
 static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
                                         struct tc_mqprio_qopt_offload *mqprio)
 {
+       mlx5e_fp_preactivate preactivate;
        struct mlx5e_params new_params;
+       bool nch_changed;
        int err;
 
        err = mlx5e_mqprio_channel_validate(priv, mqprio);
@@ -2952,12 +3003,12 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
                return err;
 
        new_params = priv->channels.params;
-       new_params.mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
-       new_params.mqprio.num_tc = mqprio->qopt.num_tc;
-       err = mlx5e_safe_switch_params(priv, &new_params,
-                                      mlx5e_mqprio_channel_set_tcs_ctx, mqprio, true);
+       mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt);
 
-       return err;
+       nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
+       preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
+               mlx5e_update_netdev_queues_ctx;
+       return mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
 }
 
 static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
@@ -3065,7 +3116,7 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
 {
        int i;
 
-       for (i = 0; i < priv->max_nch; i++) {
+       for (i = 0; i < priv->stats_nch; i++) {
                struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i];
                struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
                struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
@@ -3274,20 +3325,67 @@ static int set_feature_rx_all(struct net_device *netdev, bool enable)
        return mlx5_set_port_fcs(mdev, !enable);
 }
 
+static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable)
+{
+       u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {};
+       bool supported, curr_state;
+       int err;
+
+       if (!MLX5_CAP_GEN(mdev, ports_check))
+               return 0;
+
+       err = mlx5_query_ports_check(mdev, in, sizeof(in));
+       if (err)
+               return err;
+
+       supported = MLX5_GET(pcmr_reg, in, rx_ts_over_crc_cap);
+       curr_state = MLX5_GET(pcmr_reg, in, rx_ts_over_crc);
+
+       if (!supported || enable == curr_state)
+               return 0;
+
+       MLX5_SET(pcmr_reg, in, local_port, 1);
+       MLX5_SET(pcmr_reg, in, rx_ts_over_crc, enable);
+
+       return mlx5_set_ports_check(mdev, in, sizeof(in));
+}
+
 static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
+       struct mlx5e_channels *chs = &priv->channels;
+       struct mlx5_core_dev *mdev = priv->mdev;
        int err;
 
        mutex_lock(&priv->state_lock);
 
-       priv->channels.params.scatter_fcs_en = enable;
-       err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable);
-       if (err)
-               priv->channels.params.scatter_fcs_en = !enable;
+       if (enable) {
+               err = mlx5e_set_rx_port_ts(mdev, false);
+               if (err)
+                       goto out;
 
-       mutex_unlock(&priv->state_lock);
+               chs->params.scatter_fcs_en = true;
+               err = mlx5e_modify_channels_scatter_fcs(chs, true);
+               if (err) {
+                       chs->params.scatter_fcs_en = false;
+                       mlx5e_set_rx_port_ts(mdev, true);
+               }
+       } else {
+               chs->params.scatter_fcs_en = false;
+               err = mlx5e_modify_channels_scatter_fcs(chs, false);
+               if (err) {
+                       chs->params.scatter_fcs_en = true;
+                       goto out;
+               }
+               err = mlx5e_set_rx_port_ts(mdev, true);
+               if (err) {
+                       mlx5_core_warn(mdev, "Failed to set RX port timestamp %d\n", err);
+                       err = 0;
+               }
+       }
 
+out:
+       mutex_unlock(&priv->state_lock);
        return err;
 }
 
@@ -4186,13 +4284,11 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
        struct mlx5_core_dev *mdev = priv->mdev;
        u8 rx_cq_period_mode;
 
-       priv->max_nch = mlx5e_calc_max_nch(priv, priv->profile);
-
        params->sw_mtu = mtu;
        params->hard_mtu = MLX5E_ETH_HARD_MTU;
        params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2,
                                     priv->max_nch);
-       params->mqprio.num_tc = 1;
+       mlx5e_params_mqprio_reset(params);
 
        /* Set an initial non-zero value, so that mlx5e_select_queue won't
         * divide by zero if called before first activating channels.
@@ -4482,6 +4578,12 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
 
        mlx5e_timestamp_init(priv);
 
+       err = mlx5e_fs_init(priv);
+       if (err) {
+               mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
+               return err;
+       }
+
        err = mlx5e_ipsec_init(priv);
        if (err)
                mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
@@ -4499,6 +4601,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
        mlx5e_health_destroy_reporters(priv);
        mlx5e_tls_cleanup(priv);
        mlx5e_ipsec_cleanup(priv);
+       mlx5e_fs_cleanup(priv);
 }
 
 static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
@@ -4682,8 +4785,35 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
        .rx_ptp_support    = true,
 };
 
+static unsigned int
+mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev,
+                  const struct mlx5e_profile *profile)
+
+{
+       unsigned int max_nch, tmp;
+
+       /* core resources */
+       max_nch = mlx5e_get_max_num_channels(mdev);
+
+       /* netdev rx queues */
+       tmp = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
+       max_nch = min_t(unsigned int, max_nch, tmp);
+
+       /* netdev tx queues */
+       tmp = netdev->num_tx_queues;
+       if (mlx5_qos_is_supported(mdev))
+               tmp -= mlx5e_qos_max_leaf_nodes(mdev);
+       if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
+               tmp -= profile->max_tc;
+       tmp = tmp / profile->max_tc;
+       max_nch = min_t(unsigned int, max_nch, tmp);
+
+       return max_nch;
+}
+
 /* mlx5e generic netdev management API (move to en_common.c) */
 int mlx5e_priv_init(struct mlx5e_priv *priv,
+                   const struct mlx5e_profile *profile,
                    struct net_device *netdev,
                    struct mlx5_core_dev *mdev)
 {
@@ -4691,6 +4821,8 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
        priv->mdev        = mdev;
        priv->netdev      = netdev;
        priv->msglevel    = MLX5E_MSG_LEVEL;
+       priv->max_nch     = mlx5e_calc_max_nch(mdev, netdev, profile);
+       priv->stats_nch   = priv->max_nch;
        priv->max_opened_tc = 1;
 
        if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL))
@@ -4734,7 +4866,8 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
 }
 
 struct net_device *
-mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int rxqs)
+mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
+                   unsigned int txqs, unsigned int rxqs)
 {
        struct net_device *netdev;
        int err;
@@ -4745,7 +4878,7 @@ mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int
                return NULL;
        }
 
-       err = mlx5e_priv_init(netdev_priv(netdev), netdev, mdev);
+       err = mlx5e_priv_init(netdev_priv(netdev), profile, netdev, mdev);
        if (err) {
                mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
                goto err_free_netdev;
@@ -4787,7 +4920,7 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
        clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
 
        /* max number of channels may have changed */
-       max_nch = mlx5e_get_max_num_channels(priv->mdev);
+       max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
        if (priv->channels.params.num_channels > max_nch) {
                mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
                /* Reducing the number of channels - RXFH has to be reset, and
@@ -4795,7 +4928,18 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
                 */
                priv->netdev->priv_flags &= ~IFF_RXFH_CONFIGURED;
                priv->channels.params.num_channels = max_nch;
+               if (priv->channels.params.mqprio.mode == TC_MQPRIO_MODE_CHANNEL) {
+                       mlx5_core_warn(priv->mdev, "MLX5E: Disabling MQPRIO channel mode\n");
+                       mlx5e_params_mqprio_reset(&priv->channels.params);
+               }
+       }
+       if (max_nch != priv->max_nch) {
+               mlx5_core_warn(priv->mdev,
+                              "MLX5E: Updating max number of channels from %u to %u\n",
+                              priv->max_nch, max_nch);
+               priv->max_nch = max_nch;
        }
+
        /* 1. Set the real number of queues in the kernel the first time.
         * 2. Set our default XPS cpumask.
         * 3. Build the RQT.
@@ -4860,7 +5004,7 @@ mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mde
        struct mlx5e_priv *priv = netdev_priv(netdev);
        int err;
 
-       err = mlx5e_priv_init(priv, netdev, mdev);
+       err = mlx5e_priv_init(priv, new_profile, netdev, mdev);
        if (err) {
                mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
                return err;
@@ -4886,20 +5030,12 @@ priv_cleanup:
 int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
                                const struct mlx5e_profile *new_profile, void *new_ppriv)
 {
-       unsigned int new_max_nch = mlx5e_calc_max_nch(priv, new_profile);
        const struct mlx5e_profile *orig_profile = priv->profile;
        struct net_device *netdev = priv->netdev;
        struct mlx5_core_dev *mdev = priv->mdev;
        void *orig_ppriv = priv->ppriv;
        int err, rollback_err;
 
-       /* sanity */
-       if (new_max_nch != priv->max_nch) {
-               netdev_warn(netdev, "%s: Replacing profile with different max channels\n",
-                           __func__);
-               return -EINVAL;
-       }
-
        /* cleanup old profile */
        mlx5e_detach_netdev(priv);
        priv->profile->cleanup(priv);
@@ -4995,7 +5131,7 @@ static int mlx5e_probe(struct auxiliary_device *adev,
        nch = mlx5e_get_max_num_channels(mdev);
        txqs = nch * profile->max_tc + ptp_txqs + qos_sqs;
        rxqs = nch * profile->rq_groups;
-       netdev = mlx5e_create_netdev(mdev, txqs, rxqs);
+       netdev = mlx5e_create_netdev(mdev, profile, txqs, rxqs);
        if (!netdev) {
                mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
                return -ENOMEM;
index ae71a17fdb277bfed76ee9041a1c0fe2607f4e88..0684ac6699b2de8f832e6a7f29df1f31d94227f5 100644 (file)
@@ -596,7 +596,6 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
                                         MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
                                         MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
 
-       priv->max_nch = mlx5e_calc_max_nch(priv, priv->profile);
        params = &priv->channels.params;
 
        params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
@@ -619,6 +618,11 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
        params->mqprio.num_tc       = 1;
        params->tunneled_offload_en = false;
 
+       /* Set an initial non-zero value, so that mlx5e_select_queue won't
+        * divide by zero if called before first activating channels.
+        */
+       priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc;
+
        mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
 }
 
@@ -644,7 +648,6 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
        netdev->hw_features    |= NETIF_F_RXCSUM;
 
        netdev->features |= netdev->hw_features;
-       netdev->features |= NETIF_F_VLAN_CHALLENGED;
        netdev->features |= NETIF_F_NETNS_LOCAL;
 }
 
@@ -1169,7 +1172,7 @@ mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
        nch = mlx5e_get_max_num_channels(dev);
        txqs = nch * profile->max_tc;
        rxqs = nch * profile->rq_groups;
-       netdev = mlx5e_create_netdev(dev, txqs, rxqs);
+       netdev = mlx5e_create_netdev(dev, profile, txqs, rxqs);
        if (!netdev) {
                mlx5_core_warn(dev,
                               "Failed to create representor netdev for vport %d\n",
index 3c65fd0bcf31c56f67f41a2d65a91902776e7be2..29a6586ef28dc12848a4625e147b1f252c8cddab 100644 (file)
@@ -1001,14 +1001,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
                goto csum_unnecessary;
 
        if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
-               u8 ipproto = get_ip_proto(skb, network_depth, proto);
-
-               if (unlikely(ipproto == IPPROTO_SCTP))
+               if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
                        goto csum_unnecessary;
 
-               if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
-                       goto csum_none;
-
                stats->csum_complete++;
                skb->ip_summed = CHECKSUM_COMPLETE;
                skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
index e4f5b63951482462f0842b18d684821e56ede381..e1dd17019030e6c339592095a0f10f860aba7216 100644 (file)
@@ -34,6 +34,7 @@
 #include "en.h"
 #include "en_accel/tls.h"
 #include "en_accel/en_accel.h"
+#include "en/ptp.h"
 
 static unsigned int stats_grps_num(struct mlx5e_priv *priv)
 {
@@ -450,7 +451,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
 
        memset(s, 0, sizeof(*s));
 
-       for (i = 0; i < priv->max_nch; i++) {
+       for (i = 0; i < priv->stats_nch; i++) {
                struct mlx5e_channel_stats *channel_stats =
                        &priv->channel_stats[i];
                int j;
@@ -2076,7 +2077,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
        if (priv->rx_ptp_opened) {
                for (i = 0; i < NUM_PTP_RQ_STATS; i++)
                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
-                               ptp_rq_stats_desc[i].format);
+                               ptp_rq_stats_desc[i].format, MLX5E_PTP_CHANNEL_IX);
        }
        return idx;
 }
@@ -2119,7 +2120,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
 
 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
 {
-       int max_nch = priv->max_nch;
+       int max_nch = priv->stats_nch;
 
        return (NUM_RQ_STATS * max_nch) +
               (NUM_CH_STATS * max_nch) +
@@ -2133,7 +2134,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
 {
        bool is_xsk = priv->xsk.ever_used;
-       int max_nch = priv->max_nch;
+       int max_nch = priv->stats_nch;
        int i, j, tc;
 
        for (i = 0; i < max_nch; i++)
@@ -2175,7 +2176,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
 {
        bool is_xsk = priv->xsk.ever_used;
-       int max_nch = priv->max_nch;
+       int max_nch = priv->stats_nch;
        int i, j, tc;
 
        for (i = 0; i < max_nch; i++)
index ba81647920167d9152fee996ddcd47a9872d62ab..129ff7e0d65cc4b04f1eb8ae96037958ce5cb80b 100644 (file)
@@ -67,6 +67,8 @@
 #include "lib/fs_chains.h"
 #include "diag/en_tc_tracepoint.h"
 #include <asm/div64.h>
+#include "lag.h"
+#include "lag_mp.h"
 
 #define nic_chains(priv) ((priv)->fs.tc.chains)
 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
index c63d78eda6060febc892bfb0a661f2c22603a7d1..188994d091c54fd153e8f5574cd3d3224a4b5132 100644 (file)
@@ -213,19 +213,18 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
        memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
 }
 
-/* If packet is not IP's CHECKSUM_PARTIAL (e.g. icmd packet),
- * need to set L3 checksum flag for IPsec
- */
 static void
 ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                            struct mlx5_wqe_eth_seg *eseg)
 {
+       struct xfrm_offload *xo = xfrm_offload(skb);
+
        eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
-       if (skb->encapsulation) {
-               eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
+       if (xo->inner_ipproto) {
+               eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM;
+       } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+               eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
                sq->stats->csum_partial_inner++;
-       } else {
-               sq->stats->csum_partial++;
        }
 }
 
@@ -234,6 +233,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                            struct mlx5e_accel_tx_state *accel,
                            struct mlx5_wqe_eth_seg *eseg)
 {
+       if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
+               ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
+               return;
+       }
+
        if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
                eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
                if (skb->encapsulation) {
@@ -249,8 +253,6 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
                sq->stats->csum_partial++;
 #endif
-       } else if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
-               ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
        } else
                sq->stats->csum_none++;
 }
index 0399a396d1662d7b553141cbb3f2646701ab0ebf..60a73990017c2a3683d112e0fc03a746276b2bbe 100644 (file)
@@ -79,12 +79,16 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
        int dest_num = 0;
        int err = 0;
 
-       if (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
+       if (vport->egress.legacy.drop_counter) {
+               drop_counter = vport->egress.legacy.drop_counter;
+       } else if (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
                drop_counter = mlx5_fc_create(esw->dev, false);
-               if (IS_ERR(drop_counter))
+               if (IS_ERR(drop_counter)) {
                        esw_warn(esw->dev,
                                 "vport[%d] configure egress drop rule counter err(%ld)\n",
                                 vport->vport, PTR_ERR(drop_counter));
+                       drop_counter = NULL;
+               }
                vport->egress.legacy.drop_counter = drop_counter;
        }
 
@@ -123,7 +127,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
 
        /* Attach egress drop flow counter */
-       if (!IS_ERR_OR_NULL(drop_counter)) {
+       if (drop_counter) {
                flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
                drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
                drop_ctr_dst.counter_id = mlx5_fc_id(drop_counter);
@@ -162,7 +166,7 @@ void esw_acl_egress_lgcy_cleanup(struct mlx5_eswitch *esw,
        esw_acl_egress_table_destroy(vport);
 
 clean_drop_counter:
-       if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter)) {
+       if (vport->egress.legacy.drop_counter) {
                mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
                vport->egress.legacy.drop_counter = NULL;
        }
index f75b86abaf1cdf5d0bbd0a79fe1de296c1d3cbf5..b1a5199260f69627a151785eebcbad361d8100f3 100644 (file)
@@ -160,7 +160,9 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
 
        esw_acl_ingress_lgcy_rules_destroy(vport);
 
-       if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
+       if (vport->ingress.legacy.drop_counter) {
+               counter = vport->ingress.legacy.drop_counter;
+       } else if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
                counter = mlx5_fc_create(esw->dev, false);
                if (IS_ERR(counter)) {
                        esw_warn(esw->dev,
index 985e305179d1d440515c0ff8d4fbe11a665b1caa..c6cc67cb4f6add88e0b1f24f00c6112fd8c521d1 100644 (file)
@@ -473,10 +473,9 @@ esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
 
 err_min_rate:
        list_del(&group->list);
-       err = mlx5_destroy_scheduling_element_cmd(esw->dev,
-                                                 SCHEDULING_HIERARCHY_E_SWITCH,
-                                                 group->tsar_ix);
-       if (err)
+       if (mlx5_destroy_scheduling_element_cmd(esw->dev,
+                                               SCHEDULING_HIERARCHY_E_SWITCH,
+                                               group->tsar_ix))
                NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR for group failed");
 err_sched_elem:
        kfree(group);
index 67571e5040d68cf53df14de1c69dc72727e57c3c..269ebb53eda6760f9ecf50206b3ecd4ea818862b 100644 (file)
@@ -113,7 +113,7 @@ static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
        struct mlx5e_sw_stats s = { 0 };
        int i, j;
 
-       for (i = 0; i < priv->max_nch; i++) {
+       for (i = 0; i < priv->stats_nch; i++) {
                struct mlx5e_channel_stats *channel_stats;
                struct mlx5e_rq_stats *rq_stats;
 
@@ -711,7 +711,7 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num,
                        goto destroy_ht;
        }
 
-       err = mlx5e_priv_init(epriv, netdev, mdev);
+       err = mlx5e_priv_init(epriv, prof, netdev, mdev);
        if (err)
                goto destroy_mdev_resources;
 
index ca5690b0a7abbf3628e7d56d96f9d9ee6233099a..d2105c1635c34f929f3323e5026482fbd562bf95 100644 (file)
@@ -442,6 +442,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
        if (!mlx5_lag_is_ready(ldev)) {
                do_bond = false;
        } else {
+               /* VF LAG is in multipath mode, ignore bond change requests */
+               if (mlx5_lag_is_multipath(dev0))
+                       return;
+
                tracker = ldev->tracker;
 
                do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
index f239b352a58a1859a1cbfb7c2edb1e51dd361637..21fdaf708f1fe80acaa95acba0d754c92c262c06 100644 (file)
@@ -9,20 +9,23 @@
 #include "eswitch.h"
 #include "lib/mlx5.h"
 
+static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
+{
+       return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
+}
+
 static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
 {
        if (!mlx5_lag_is_ready(ldev))
                return false;
 
+       if (__mlx5_lag_is_active(ldev) && !__mlx5_lag_is_multipath(ldev))
+               return false;
+
        return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
                                         ldev->pf[MLX5_LAG_P2].dev);
 }
 
-static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
-{
-       return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
-}
-
 bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
 {
        struct mlx5_lag *ldev;
index 729c839397a89f65e86d17df17186536288d0d68..dea199e79beda53c150de12e921d5554756d7e3c 100644 (file)
@@ -24,12 +24,14 @@ struct lag_mp {
 void mlx5_lag_mp_reset(struct mlx5_lag *ldev);
 int mlx5_lag_mp_init(struct mlx5_lag *ldev);
 void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev);
+bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
 
 #else /* CONFIG_MLX5_ESWITCH */
 
 static inline void mlx5_lag_mp_reset(struct mlx5_lag *ldev) {};
 static inline int mlx5_lag_mp_init(struct mlx5_lag *ldev) { return 0; }
 static inline void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev) {}
+bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev) { return false; }
 
 #endif /* CONFIG_MLX5_ESWITCH */
 #endif /* __MLX5_LAG_MP_H__ */
index ffac8a0e7a2321659976e9dd1714f87617f8662e..91e806c1aa21126abe7bcdc9e8817a8efd1bc79c 100644 (file)
@@ -448,22 +448,20 @@ static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
        return cycles_now + cycles_delta;
 }
 
-static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev,
-                                     s64 sec, u32 nsec)
+static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev, s64 sec)
 {
-       struct timespec64 ts;
+       struct timespec64 ts = {};
        s64 target_ns;
 
        ts.tv_sec = sec;
-       ts.tv_nsec = nsec;
        target_ns = timespec64_to_ns(&ts);
 
        return find_target_cycles(mdev, target_ns);
 }
 
-static u64 perout_conf_real_time(s64 sec, u32 nsec)
+static u64 perout_conf_real_time(s64 sec)
 {
-       return (u64)nsec | (u64)sec << 32;
+       return (u64)sec << 32;
 }
 
 static int mlx5_perout_configure(struct ptp_clock_info *ptp,
@@ -474,6 +472,7 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
                        container_of(ptp, struct mlx5_clock, ptp_info);
        struct mlx5_core_dev *mdev =
                        container_of(clock, struct mlx5_core_dev, clock);
+       bool rt_mode = mlx5_real_time_mode(mdev);
        u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
        struct timespec64 ts;
        u32 field_select = 0;
@@ -501,8 +500,10 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
 
        if (on) {
                bool rt_mode = mlx5_real_time_mode(mdev);
-               u32 nsec;
-               s64 sec;
+               s64 sec = rq->perout.start.sec;
+
+               if (rq->perout.start.nsec)
+                       return -EINVAL;
 
                pin_mode = MLX5_PIN_MODE_OUT;
                pattern = MLX5_OUT_PATTERN_PERIODIC;
@@ -513,14 +514,11 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
                if ((ns >> 1) != 500000000LL)
                        return -EINVAL;
 
-               nsec = rq->perout.start.nsec;
-               sec = rq->perout.start.sec;
-
                if (rt_mode && sec > U32_MAX)
                        return -EINVAL;
 
-               time_stamp = rt_mode ? perout_conf_real_time(sec, nsec) :
-                                      perout_conf_internal_timer(mdev, sec, nsec);
+               time_stamp = rt_mode ? perout_conf_real_time(sec) :
+                                      perout_conf_internal_timer(mdev, sec);
 
                field_select |= MLX5_MTPPS_FS_PIN_MODE |
                                MLX5_MTPPS_FS_PATTERN |
@@ -538,6 +536,9 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
        if (err)
                return err;
 
+       if (rt_mode)
+               return 0;
+
        return mlx5_set_mtppse(mdev, pin, 0,
                               MLX5_EVENT_MODE_REPETETIVE & on);
 }
@@ -705,20 +706,14 @@ static void ts_next_sec(struct timespec64 *ts)
 static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
                                        struct mlx5_clock *clock)
 {
-       bool rt_mode = mlx5_real_time_mode(mdev);
        struct timespec64 ts;
        s64 target_ns;
 
-       if (rt_mode)
-               ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
-       else
-               mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
-
+       mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
        ts_next_sec(&ts);
        target_ns = timespec64_to_ns(&ts);
 
-       return rt_mode ? perout_conf_real_time(ts.tv_sec, ts.tv_nsec) :
-                        find_target_cycles(mdev, target_ns);
+       return find_target_cycles(mdev, target_ns);
 }
 
 static int mlx5_pps_event(struct notifier_block *nb,
index c79a10b3454d48244d5f43cd012fd4d31452d420..763c83a0238091ef55b7e13d78bc4dc41212be60 100644 (file)
@@ -13,8 +13,8 @@
 #endif
 
 #define MLX5_MAX_IRQ_NAME (32)
-/* max irq_index is 255. three chars */
-#define MLX5_MAX_IRQ_IDX_CHARS (3)
+/* max irq_index is 2047, so four chars */
+#define MLX5_MAX_IRQ_IDX_CHARS (4)
 
 #define MLX5_SFS_PER_CTRL_IRQ 64
 #define MLX5_IRQ_CTRL_SF_MAX 8
@@ -633,8 +633,9 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
 int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table)
 {
        if (table->sf_comp_pool)
-               return table->sf_comp_pool->xa_num_irqs.max -
-                       table->sf_comp_pool->xa_num_irqs.min + 1;
+               return min_t(int, num_online_cpus(),
+                            table->sf_comp_pool->xa_num_irqs.max -
+                            table->sf_comp_pool->xa_num_irqs.min + 1);
        else
                return mlx5_irq_table_get_num_comp(table);
 }
index 0998dcc9cac04688bbefc092973543a7fb7493e5..b29824448aa858d078878af2ebb366c26611e3dc 100644 (file)
 #define MLXSW_THERMAL_ZONE_MAX_NAME    16
 #define MLXSW_THERMAL_TEMP_SCORE_MAX   GENMASK(31, 0)
 #define MLXSW_THERMAL_MAX_STATE        10
+#define MLXSW_THERMAL_MIN_STATE        2
 #define MLXSW_THERMAL_MAX_DUTY 255
-/* Minimum and maximum fan allowed speed in percent: from 20% to 100%. Values
- * MLXSW_THERMAL_MAX_STATE + x, where x is between 2 and 10 are used for
- * setting fan speed dynamic minimum. For example, if value is set to 14 (40%)
- * cooling levels vector will be set to 4, 4, 4, 4, 4, 5, 6, 7, 8, 9, 10 to
- * introduce PWM speed in percent: 40, 40, 40, 40, 40, 50, 60. 70, 80, 90, 100.
- */
-#define MLXSW_THERMAL_SPEED_MIN                (MLXSW_THERMAL_MAX_STATE + 2)
-#define MLXSW_THERMAL_SPEED_MAX                (MLXSW_THERMAL_MAX_STATE * 2)
-#define MLXSW_THERMAL_SPEED_MIN_LEVEL  2               /* 20% */
 
 /* External cooling devices, allowed for binding to mlxsw thermal zones. */
 static char * const mlxsw_thermal_external_allowed_cdev[] = {
@@ -646,49 +638,16 @@ static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev,
        struct mlxsw_thermal *thermal = cdev->devdata;
        struct device *dev = thermal->bus_info->dev;
        char mfsc_pl[MLXSW_REG_MFSC_LEN];
-       unsigned long cur_state, i;
        int idx;
-       u8 duty;
        int err;
 
+       if (state > MLXSW_THERMAL_MAX_STATE)
+               return -EINVAL;
+
        idx = mlxsw_get_cooling_device_idx(thermal, cdev);
        if (idx < 0)
                return idx;
 
-       /* Verify if this request is for changing allowed fan dynamical
-        * minimum. If it is - update cooling levels accordingly and update
-        * state, if current state is below the newly requested minimum state.
-        * For example, if current state is 5, and minimal state is to be
-        * changed from 4 to 6, thermal->cooling_levels[0 to 5] will be changed
-        * all from 4 to 6. And state 5 (thermal->cooling_levels[4]) should be
-        * overwritten.
-        */
-       if (state >= MLXSW_THERMAL_SPEED_MIN &&
-           state <= MLXSW_THERMAL_SPEED_MAX) {
-               state -= MLXSW_THERMAL_MAX_STATE;
-               for (i = 0; i <= MLXSW_THERMAL_MAX_STATE; i++)
-                       thermal->cooling_levels[i] = max(state, i);
-
-               mlxsw_reg_mfsc_pack(mfsc_pl, idx, 0);
-               err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
-               if (err)
-                       return err;
-
-               duty = mlxsw_reg_mfsc_pwm_duty_cycle_get(mfsc_pl);
-               cur_state = mlxsw_duty_to_state(duty);
-
-               /* If current fan state is lower than requested dynamical
-                * minimum, increase fan speed up to dynamical minimum.
-                */
-               if (state < cur_state)
-                       return 0;
-
-               state = cur_state;
-       }
-
-       if (state > MLXSW_THERMAL_MAX_STATE)
-               return -EINVAL;
-
        /* Normalize the state to the valid speed range. */
        state = thermal->cooling_levels[state];
        mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state));
@@ -998,8 +957,7 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
 
        /* Initialize cooling levels per PWM state. */
        for (i = 0; i < MLXSW_THERMAL_MAX_STATE; i++)
-               thermal->cooling_levels[i] = max(MLXSW_THERMAL_SPEED_MIN_LEVEL,
-                                                i);
+               thermal->cooling_levels[i] = max(MLXSW_THERMAL_MIN_STATE, i);
 
        thermal->polling_delay = bus_info->low_frequency ?
                                 MLXSW_THERMAL_SLOW_POLL_INT :
index 5cc00d22c708c46f37afbea48830223b53237cb9..6ecc4eb30e74b80582abde169a21afbacc577705 100644 (file)
@@ -4,8 +4,6 @@
 #
 
 obj-$(CONFIG_KS8842) += ks8842.o
-obj-$(CONFIG_KS8851) += ks8851.o
-ks8851-objs = ks8851_common.o ks8851_spi.o
-obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
-ks8851_mll-objs = ks8851_common.o ks8851_par.o
+obj-$(CONFIG_KS8851) += ks8851_common.o ks8851_spi.o
+obj-$(CONFIG_KS8851_MLL) += ks8851_common.o ks8851_par.o
 obj-$(CONFIG_KSZ884X_PCI) += ksz884x.o
index 3f69bb59ba49a8fb1b5e091261e26bffdbbeffdc..a6db1a8156e1a8ebf2b31ca26b9946ade628ec11 100644 (file)
@@ -1057,6 +1057,7 @@ int ks8851_suspend(struct device *dev)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(ks8851_suspend);
 
 int ks8851_resume(struct device *dev)
 {
@@ -1070,6 +1071,7 @@ int ks8851_resume(struct device *dev)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(ks8851_resume);
 #endif
 
 static int ks8851_register_mdiobus(struct ks8851_net *ks, struct device *dev)
@@ -1243,6 +1245,7 @@ err_reg:
 err_reg_io:
        return ret;
 }
+EXPORT_SYMBOL_GPL(ks8851_probe_common);
 
 int ks8851_remove_common(struct device *dev)
 {
@@ -1261,3 +1264,8 @@ int ks8851_remove_common(struct device *dev)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(ks8851_remove_common);
+
+MODULE_DESCRIPTION("KS8851 Network driver");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL");
index 796e46a5392699c932d1d9b94c09bd5749ad6b18..81a8ccca7e5e075de4e1c4d5dfc21cab6874a686 100644 (file)
@@ -497,13 +497,19 @@ static struct regmap_bus phymap_encx24j600 = {
        .reg_read = regmap_encx24j600_phy_reg_read,
 };
 
-void devm_regmap_init_encx24j600(struct device *dev,
-                                struct encx24j600_context *ctx)
+int devm_regmap_init_encx24j600(struct device *dev,
+                               struct encx24j600_context *ctx)
 {
        mutex_init(&ctx->mutex);
        regcfg.lock_arg = ctx;
        ctx->regmap = devm_regmap_init(dev, &regmap_encx24j600, ctx, &regcfg);
+       if (IS_ERR(ctx->regmap))
+               return PTR_ERR(ctx->regmap);
        ctx->phymap = devm_regmap_init(dev, &phymap_encx24j600, ctx, &phycfg);
+       if (IS_ERR(ctx->phymap))
+               return PTR_ERR(ctx->phymap);
+
+       return 0;
 }
 EXPORT_SYMBOL_GPL(devm_regmap_init_encx24j600);
 
index ee921a99e439a23f42b27f34cca6a01a64b99957..0bc6b3176fbf0a5c4703c6a4b4b36e78f48be4b2 100644 (file)
@@ -1023,10 +1023,13 @@ static int encx24j600_spi_probe(struct spi_device *spi)
        priv->speed = SPEED_100;
 
        priv->ctx.spi = spi;
-       devm_regmap_init_encx24j600(&spi->dev, &priv->ctx);
        ndev->irq = spi->irq;
        ndev->netdev_ops = &encx24j600_netdev_ops;
 
+       ret = devm_regmap_init_encx24j600(&spi->dev, &priv->ctx);
+       if (ret)
+               goto out_free;
+
        mutex_init(&priv->lock);
 
        /* Reset device and check if it is connected */
index fac61a8fbd0205feef4e1ed3608b733fcad12722..34c5a289898c925c4cb477afb1d1a773796e2131 100644 (file)
@@ -15,8 +15,8 @@ struct encx24j600_context {
        int bank;
 };
 
-void devm_regmap_init_encx24j600(struct device *dev,
-                                struct encx24j600_context *ctx);
+int devm_regmap_init_encx24j600(struct device *dev,
+                               struct encx24j600_context *ctx);
 
 /* Single-byte instructions */
 #define BANK_SELECT(bank) (0xC0 | ((bank & (BANK_MASK >> BANK_SHIFT)) << 1))
index cbece6e9bff27a4dff5f1f71e46e6467d826e8c7..5030dfca38798d28c04da06e518171e3bf818d07 100644 (file)
@@ -758,6 +758,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
                        err = dev_err_probe(sparx5->dev, PTR_ERR(serdes),
                                            "port %u: missing serdes\n",
                                            portno);
+                       of_node_put(portnp);
                        goto cleanup_config;
                }
                config->portno = portno;
index 1b21030308e57c6004c299eb6e76de5a822fd8d6..030ae89f3a337abb52568185b494c15945e309f5 100644 (file)
@@ -1477,8 +1477,10 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
        if (err)
                goto out;
 
-       if (cq->gdma_id >= gc->max_num_cqs)
+       if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
+               err = -EINVAL;
                goto out;
+       }
 
        gc->cq_table[cq->gdma_id] = cq->gdma_cq;
 
index 559177e6ded40754b138d0bc1e5fe425972a737c..a08e4f530c1c1186a55d8a1392a3786792c11396 100644 (file)
@@ -472,9 +472,9 @@ void ocelot_phylink_mac_link_down(struct ocelot *ocelot, int port,
            !(quirks & OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP))
                ocelot_port_rmwl(ocelot_port,
                                 DEV_CLOCK_CFG_MAC_TX_RST |
-                                DEV_CLOCK_CFG_MAC_TX_RST,
+                                DEV_CLOCK_CFG_MAC_RX_RST,
                                 DEV_CLOCK_CFG_MAC_TX_RST |
-                                DEV_CLOCK_CFG_MAC_TX_RST,
+                                DEV_CLOCK_CFG_MAC_RX_RST,
                                 DEV_CLOCK_CFG);
 }
 EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_down);
@@ -569,49 +569,44 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
 }
 EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_up);
 
-static void ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
-                                        struct sk_buff *clone)
+static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
+                                       struct sk_buff *clone)
 {
        struct ocelot_port *ocelot_port = ocelot->ports[port];
+       unsigned long flags;
+
+       spin_lock_irqsave(&ocelot->ts_id_lock, flags);
 
-       spin_lock(&ocelot_port->ts_id_lock);
+       if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID ||
+           ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) {
+               spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
+               return -EBUSY;
+       }
 
        skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
        /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
        OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id;
-       ocelot_port->ts_id = (ocelot_port->ts_id + 1) % 4;
-       skb_queue_tail(&ocelot_port->tx_skbs, clone);
 
-       spin_unlock(&ocelot_port->ts_id_lock);
-}
+       ocelot_port->ts_id++;
+       if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID)
+               ocelot_port->ts_id = 0;
 
-u32 ocelot_ptp_rew_op(struct sk_buff *skb)
-{
-       struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
-       u8 ptp_cmd = OCELOT_SKB_CB(skb)->ptp_cmd;
-       u32 rew_op = 0;
+       ocelot_port->ptp_skbs_in_flight++;
+       ocelot->ptp_skbs_in_flight++;
 
-       if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP && clone) {
-               rew_op = ptp_cmd;
-               rew_op |= OCELOT_SKB_CB(clone)->ts_id << 3;
-       } else if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
-               rew_op = ptp_cmd;
-       }
+       skb_queue_tail(&ocelot_port->tx_skbs, clone);
 
-       return rew_op;
+       spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
+
+       return 0;
 }
-EXPORT_SYMBOL(ocelot_ptp_rew_op);
 
-static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb)
+static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb,
+                                      unsigned int ptp_class)
 {
        struct ptp_header *hdr;
-       unsigned int ptp_class;
        u8 msgtype, twostep;
 
-       ptp_class = ptp_classify_raw(skb);
-       if (ptp_class == PTP_CLASS_NONE)
-               return false;
-
        hdr = ptp_parse_header(skb, ptp_class);
        if (!hdr)
                return false;
@@ -631,10 +626,20 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
 {
        struct ocelot_port *ocelot_port = ocelot->ports[port];
        u8 ptp_cmd = ocelot_port->ptp_cmd;
+       unsigned int ptp_class;
+       int err;
+
+       /* Don't do anything if PTP timestamping not enabled */
+       if (!ptp_cmd)
+               return 0;
+
+       ptp_class = ptp_classify_raw(skb);
+       if (ptp_class == PTP_CLASS_NONE)
+               return -EINVAL;
 
        /* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */
        if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
-               if (ocelot_ptp_is_onestep_sync(skb)) {
+               if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) {
                        OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
                        return 0;
                }
@@ -648,8 +653,12 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
                if (!(*clone))
                        return -ENOMEM;
 
-               ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
+               err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
+               if (err)
+                       return err;
+
                OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
+               OCELOT_SKB_CB(*clone)->ptp_class = ptp_class;
        }
 
        return 0;
@@ -683,6 +692,17 @@ static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
        spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
 }
 
+static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid)
+{
+       struct ptp_header *hdr;
+
+       hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class);
+       if (WARN_ON(!hdr))
+               return false;
+
+       return seqid == ntohs(hdr->sequence_id);
+}
+
 void ocelot_get_txtstamp(struct ocelot *ocelot)
 {
        int budget = OCELOT_PTP_QUEUE_SZ;
@@ -690,10 +710,10 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
        while (budget--) {
                struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
                struct skb_shared_hwtstamps shhwtstamps;
+               u32 val, id, seqid, txport;
                struct ocelot_port *port;
                struct timespec64 ts;
                unsigned long flags;
-               u32 val, id, txport;
 
                val = ocelot_read(ocelot, SYS_PTP_STATUS);
 
@@ -706,10 +726,17 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
                /* Retrieve the ts ID and Tx port */
                id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
                txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
+               seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val);
 
-               /* Retrieve its associated skb */
                port = ocelot->ports[txport];
 
+               spin_lock(&ocelot->ts_id_lock);
+               port->ptp_skbs_in_flight--;
+               ocelot->ptp_skbs_in_flight--;
+               spin_unlock(&ocelot->ts_id_lock);
+
+               /* Retrieve its associated skb */
+try_again:
                spin_lock_irqsave(&port->tx_skbs.lock, flags);
 
                skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
@@ -722,12 +749,20 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
 
                spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
 
+               if (WARN_ON(!skb_match))
+                       continue;
+
+               if (!ocelot_validate_ptp_skb(skb_match, seqid)) {
+                       dev_err_ratelimited(ocelot->dev,
+                                           "port %d received stale TX timestamp for seqid %d, discarding\n",
+                                           txport, seqid);
+                       dev_kfree_skb_any(skb);
+                       goto try_again;
+               }
+
                /* Get the h/w timestamp */
                ocelot_get_hwtimestamp(ocelot, &ts);
 
-               if (unlikely(!skb_match))
-                       continue;
-
                /* Set the timestamp into the skb */
                memset(&shhwtstamps, 0, sizeof(shhwtstamps));
                shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
@@ -1948,7 +1983,6 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
        struct ocelot_port *ocelot_port = ocelot->ports[port];
 
        skb_queue_head_init(&ocelot_port->tx_skbs);
-       spin_lock_init(&ocelot_port->ts_id_lock);
 
        /* Basic L2 initialization */
 
@@ -2081,6 +2115,7 @@ int ocelot_init(struct ocelot *ocelot)
        mutex_init(&ocelot->stats_lock);
        mutex_init(&ocelot->ptp_lock);
        spin_lock_init(&ocelot->ptp_clock_lock);
+       spin_lock_init(&ocelot->ts_id_lock);
        snprintf(queue_name, sizeof(queue_name), "%s-stats",
                 dev_name(ocelot->dev));
        ocelot->stats_queue = create_singlethread_workqueue(queue_name);
index e54b9fb2a97a60f0653a5a1c425e58780913462f..2545727fd5b2f31288683a8648629f4fb7cb9e3f 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright 2020-2021 NXP
  */
 
+#include <linux/dsa/ocelot.h>
 #include <linux/if_bridge.h>
 #include <linux/of_net.h>
 #include <linux/phy/phy.h>
@@ -1625,7 +1626,7 @@ static int ocelot_port_phylink_create(struct ocelot *ocelot, int port,
        if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
                ocelot_port_rmwl(ocelot_port, 0,
                                 DEV_CLOCK_CFG_MAC_TX_RST |
-                                DEV_CLOCK_CFG_MAC_TX_RST,
+                                DEV_CLOCK_CFG_MAC_RX_RST,
                                 DEV_CLOCK_CFG);
 
        ocelot_port->phy_mode = phy_mode;
index 7945393a06557d972bf8600d6181eada7cfd4892..99d7376a70a748d50d123ace7797a722b5e7f372 100644 (file)
@@ -998,8 +998,8 @@ ocelot_vcap_block_find_filter_by_index(struct ocelot_vcap_block *block,
 }
 
 struct ocelot_vcap_filter *
-ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int cookie,
-                                   bool tc_offload)
+ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block,
+                                   unsigned long cookie, bool tc_offload)
 {
        struct ocelot_vcap_filter *filter;
 
index 291ae6817c2607beefc79a6d90e98205818d13fe..d51f799e4e86131500486cfd2731f9a2d7d11ac6 100644 (file)
@@ -969,6 +969,7 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
                target = ocelot_regmap_init(ocelot, res);
                if (IS_ERR(target)) {
                        err = PTR_ERR(target);
+                       of_node_put(portnp);
                        goto out_teardown;
                }
 
index 09c0e839cca5822a4a9ea88774da5abb69672f6d..3b6b2e61139e65df9a719b49768c9d3bb8e36ccb 100644 (file)
@@ -8566,7 +8566,7 @@ static void s2io_io_resume(struct pci_dev *pdev)
                        return;
                }
 
-               if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
+               if (do_s2io_prog_unicast(netdev, netdev->dev_addr) == FAILURE) {
                        s2io_card_down(sp);
                        pr_err("Can't restore mac addr after reset.\n");
                        return;
index c029950a81e202230ea8b8b4e427bf018643c258..ac1dcfa1d17908fae345db8555de8919fb40db14 100644 (file)
@@ -830,10 +830,6 @@ static int nfp_flower_init(struct nfp_app *app)
        if (err)
                goto err_cleanup;
 
-       err = flow_indr_dev_register(nfp_flower_indr_setup_tc_cb, app);
-       if (err)
-               goto err_cleanup;
-
        if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
                nfp_flower_qos_init(app);
 
@@ -942,7 +938,20 @@ static int nfp_flower_start(struct nfp_app *app)
                        return err;
        }
 
-       return nfp_tunnel_config_start(app);
+       err = flow_indr_dev_register(nfp_flower_indr_setup_tc_cb, app);
+       if (err)
+               return err;
+
+       err = nfp_tunnel_config_start(app);
+       if (err)
+               goto err_tunnel_config;
+
+       return 0;
+
+err_tunnel_config:
+       flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app,
+                                nfp_flower_setup_indr_tc_release);
+       return err;
 }
 
 static void nfp_flower_stop(struct nfp_app *app)
index 2643ea5948f48f0d94fb5b83a6ce364b1932c3c1..154399c5453febe4b993f6b546f027e8b47e7ad4 100644 (file)
@@ -196,7 +196,7 @@ int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg,
        }
 
        reg->dst_lmextn = swreg_lmextn(dst);
-       reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
+       reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg);
 
        return 0;
 }
@@ -277,7 +277,7 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
        }
 
        reg->dst_lmextn = swreg_lmextn(dst);
-       reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
+       reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg);
 
        return 0;
 }
index 381966e8f55722188f96ab9e4290d3f57edab55b..7f3322ce044c7c5d91cffd6e2002daf1a9c26e5b 100644 (file)
@@ -1292,8 +1292,10 @@ int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
        if (err && err != -EEXIST) {
                /* set the state back to NEW so we can try again later */
                f = ionic_rx_filter_by_addr(lif, addr);
-               if (f && f->state == IONIC_FILTER_STATE_SYNCED)
+               if (f && f->state == IONIC_FILTER_STATE_SYNCED) {
                        f->state = IONIC_FILTER_STATE_NEW;
+                       set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
+               }
 
                spin_unlock_bh(&lif->rx_filters.lock);
 
@@ -1377,6 +1379,10 @@ static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
 
 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
 {
+       /* Don't delete our own address from the uc list */
+       if (ether_addr_equal(addr, netdev->dev_addr))
+               return 0;
+
        return ionic_lif_list_addr(netdev_priv(netdev), addr, DEL_ADDR);
 }
 
index 25ecfcfa1281fd12256cd468e16c5fcdbe816289..69728f9013cbbfa2049973db8f9cc00b28be248b 100644 (file)
@@ -349,9 +349,6 @@ loop_out:
        list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) {
                (void)ionic_lif_addr_add(lif, sync_item->f.cmd.mac.addr);
 
-               if (sync_item->f.state != IONIC_FILTER_STATE_SYNCED)
-                       set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
-
                list_del(&sync_item->list);
                devm_kfree(dev, sync_item);
        }
index 58a854666c62b1d2a424df87442c4c57f0709c88..c14de5fcedea37c474e07e38b2ae33953ced0307 100644 (file)
@@ -380,15 +380,6 @@ static void ionic_sw_stats_get_txq_values(struct ionic_lif *lif, u64 **buf,
                                          &ionic_dbg_intr_stats_desc[i]);
                (*buf)++;
        }
-       for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) {
-               **buf = IONIC_READ_STAT64(&txqcq->napi_stats,
-                                         &ionic_dbg_napi_stats_desc[i]);
-               (*buf)++;
-       }
-       for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) {
-               **buf = txqcq->napi_stats.work_done_cntr[i];
-               (*buf)++;
-       }
        for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) {
                **buf = txstats->sg_cntr[i];
                (*buf)++;
index 15ef59aa34ff56fbec1f31cbb21db3e44bd408a7..d10e1cd6d2ba96b1a75575f468c3a21ad1625d73 100644 (file)
@@ -1299,6 +1299,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
                        } else {
                                DP_NOTICE(cdev,
                                          "Failed to acquire PTT for aRFS\n");
+                               rc = -EINVAL;
                                goto err;
                        }
                }
index 4bd3ef8f3384eb2afef04622518540385b096350..c4fe3c48ac46ab4f4bae0081b52a63e10acc9f66 100644 (file)
@@ -132,16 +132,27 @@ void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
        case MC_CMD_MEDIA_SFP_PLUS:
        case MC_CMD_MEDIA_QSFP_PLUS:
                SET_BIT(FIBRE);
-               if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+               if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) {
                        SET_BIT(1000baseT_Full);
-               if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
-                       SET_BIT(10000baseT_Full);
-               if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+                       SET_BIT(1000baseX_Full);
+               }
+               if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) {
+                       SET_BIT(10000baseCR_Full);
+                       SET_BIT(10000baseLR_Full);
+                       SET_BIT(10000baseSR_Full);
+               }
+               if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
                        SET_BIT(40000baseCR4_Full);
-               if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
+                       SET_BIT(40000baseSR4_Full);
+               }
+               if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN)) {
                        SET_BIT(100000baseCR4_Full);
-               if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
+                       SET_BIT(100000baseSR4_Full);
+               }
+               if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN)) {
                        SET_BIT(25000baseCR_Full);
+                       SET_BIT(25000baseSR_Full);
+               }
                if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
                        SET_BIT(50000baseCR2_Full);
                break;
@@ -192,15 +203,19 @@ u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
                result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
        if (TEST_BIT(1000baseT_Half))
                result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
-       if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
+       if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full) ||
+                       TEST_BIT(1000baseX_Full))
                result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
-       if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
+       if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full) ||
+                       TEST_BIT(10000baseCR_Full) || TEST_BIT(10000baseLR_Full) ||
+                       TEST_BIT(10000baseSR_Full))
                result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
-       if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
+       if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full) ||
+                       TEST_BIT(40000baseSR4_Full))
                result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
-       if (TEST_BIT(100000baseCR4_Full))
+       if (TEST_BIT(100000baseCR4_Full) || TEST_BIT(100000baseSR4_Full))
                result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
-       if (TEST_BIT(25000baseCR_Full))
+       if (TEST_BIT(25000baseCR_Full) || TEST_BIT(25000baseSR_Full))
                result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
        if (TEST_BIT(50000baseCR2_Full))
                result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
index a39c5143b3864d7dacfc81af09d860032651b337..797e51802ccbb5bfd6add5eeb2d9b164eed63a3a 100644 (file)
@@ -648,7 +648,7 @@ static int efx_ptp_get_attributes(struct efx_nic *efx)
        } else if (rc == -EINVAL) {
                fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS;
        } else if (rc == -EPERM) {
-               netif_info(efx, probe, efx->net_dev, "no PTP support\n");
+               pci_info(efx->pci_dev, "no PTP support\n");
                return rc;
        } else {
                efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf),
@@ -824,7 +824,7 @@ static int efx_ptp_disable(struct efx_nic *efx)
         * should only have been called during probe.
         */
        if (rc == -ENOSYS || rc == -EPERM)
-               netif_info(efx, probe, efx->net_dev, "no PTP support\n");
+               pci_info(efx->pci_dev, "no PTP support\n");
        else if (rc)
                efx_mcdi_display_error(efx, MC_CMD_PTP,
                                       MC_CMD_PTP_IN_DISABLE_LEN,
index 83dcfcae3d4b543e669eb69254383423662d57aa..441e7f3e5375155ce033f5fa26a2241e289af291 100644 (file)
@@ -1057,7 +1057,7 @@ void efx_siena_sriov_probe(struct efx_nic *efx)
                return;
 
        if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) {
-               netif_info(efx, probe, efx->net_dev, "no SR-IOV VFs probed\n");
+               pci_info(efx->pci_dev, "no SR-IOV VFs probed\n");
                return;
        }
        if (count > 0 && count > max_vfs)
index fbfda55b4c5263112070323a97f7baf39c516855..5e731a72cce81a4c2d50dbece319465588e3df70 100644 (file)
@@ -71,6 +71,7 @@ err_remove_config_dt:
 
 static const struct of_device_id dwmac_generic_match[] = {
        { .compatible = "st,spear600-gmac"},
+       { .compatible = "snps,dwmac-3.40a"},
        { .compatible = "snps,dwmac-3.50a"},
        { .compatible = "snps,dwmac-3.610"},
        { .compatible = "snps,dwmac-3.70a"},
index ed817011a94a09e68d353344756e322c84c1eb2c..6924a6aacbd53c8310a1476b82d5978f87094b5b 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/delay.h>
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
+#include <linux/pm_runtime.h>
 
 #include "stmmac_platform.h"
 
@@ -1528,6 +1529,8 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
                return ret;
        }
 
+       pm_runtime_get_sync(dev);
+
        if (bsp_priv->integrated_phy)
                rk_gmac_integrated_phy_powerup(bsp_priv);
 
@@ -1539,6 +1542,8 @@ static void rk_gmac_powerdown(struct rk_priv_data *gmac)
        if (gmac->integrated_phy)
                rk_gmac_integrated_phy_powerdown(gmac);
 
+       pm_runtime_put_sync(&gmac->pdev->dev);
+
        phy_power_on(gmac, false);
        gmac_clk_enable(gmac, false);
 }
index 90383abafa66acbba638c95e076b6a7fc7598b3e..f5581db0ba9bacb040d5328866ab2bc47b2029e0 100644 (file)
@@ -218,11 +218,18 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
                                readl(ioaddr + DMA_BUS_MODE + i * 4);
 }
 
-static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
-                                    struct dma_features *dma_cap)
+static int dwmac1000_get_hw_feature(void __iomem *ioaddr,
+                                   struct dma_features *dma_cap)
 {
        u32 hw_cap = readl(ioaddr + DMA_HW_FEATURE);
 
+       if (!hw_cap) {
+               /* 0x00000000 is the value read on old hardware that does not
+                * implement this register
+                */
+               return -EOPNOTSUPP;
+       }
+
        dma_cap->mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
        dma_cap->mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
        dma_cap->half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
@@ -252,6 +259,8 @@ static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
        dma_cap->number_tx_channel = (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
        /* Alternate (enhanced) DESC mode */
        dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
+
+       return 0;
 }
 
 static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
index 5be8e6a631d9b9ccdda9a25217ac6cc72efbea6a..d99fa028c6468ef988482c80dc5a941283e530f9 100644 (file)
@@ -347,8 +347,8 @@ static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
        writel(mtl_tx_op, ioaddr +  MTL_CHAN_TX_OP_MODE(channel));
 }
 
-static void dwmac4_get_hw_feature(void __iomem *ioaddr,
-                                 struct dma_features *dma_cap)
+static int dwmac4_get_hw_feature(void __iomem *ioaddr,
+                                struct dma_features *dma_cap)
 {
        u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0);
 
@@ -437,6 +437,8 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
        dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11;
        dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10;
        dma_cap->dvlan = (hw_cap & GMAC_HW_FEAT_DVLAN) >> 5;
+
+       return 0;
 }
 
 /* Enable/disable TSO feature and set MSS */
index 906e985441a93b17c11bb89bd8554ad4961fa917..5e98355f422b39e22110bded362de1939f85ce3f 100644 (file)
@@ -371,8 +371,8 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
        return ret;
 }
 
-static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
-                                   struct dma_features *dma_cap)
+static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
+                                  struct dma_features *dma_cap)
 {
        u32 hw_cap;
 
@@ -445,6 +445,8 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
        dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11;
        dma_cap->frpbs = (hw_cap & XGMAC_HWFEAT_FRPPB) >> 9;
        dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3;
+
+       return 0;
 }
 
 static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 queue)
index 6dc1c98ebec82aba543d80ee9489e62c5d724613..fe2660d5694d7994db088195c5126afd4a61a335 100644 (file)
@@ -203,8 +203,8 @@ struct stmmac_dma_ops {
        int (*dma_interrupt) (void __iomem *ioaddr,
                              struct stmmac_extra_stats *x, u32 chan, u32 dir);
        /* If supported then get the optional core features */
-       void (*get_hw_feature)(void __iomem *ioaddr,
-                              struct dma_features *dma_cap);
+       int (*get_hw_feature)(void __iomem *ioaddr,
+                             struct dma_features *dma_cap);
        /* Program the HW RX Watchdog */
        void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 queue);
        void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
@@ -255,7 +255,7 @@ struct stmmac_dma_ops {
 #define stmmac_dma_interrupt_status(__priv, __args...) \
        stmmac_do_callback(__priv, dma, dma_interrupt, __args)
 #define stmmac_get_hw_feature(__priv, __args...) \
-       stmmac_do_void_callback(__priv, dma, get_hw_feature, __args)
+       stmmac_do_callback(__priv, dma, get_hw_feature, __args)
 #define stmmac_rx_watchdog(__priv, __args...) \
        stmmac_do_void_callback(__priv, dma, rx_watchdog, __args)
 #define stmmac_set_tx_ring_len(__priv, __args...) \
index 553c4403258aa2c30cb2ca06f30f2db69efc6f3f..3d67d1fa36906aa063d82f9947160a43657568de 100644 (file)
@@ -477,6 +477,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
                        stmmac_lpi_entry_timer_config(priv, 0);
                        del_timer_sync(&priv->eee_ctrl_timer);
                        stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
+                       if (priv->hw->xpcs)
+                               xpcs_config_eee(priv->hw->xpcs,
+                                               priv->plat->mult_fact_100ns,
+                                               false);
                }
                mutex_unlock(&priv->lock);
                return false;
@@ -486,6 +490,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
                timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
                stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
                                     eee_tw_timer);
+               if (priv->hw->xpcs)
+                       xpcs_config_eee(priv->hw->xpcs,
+                                       priv->plat->mult_fact_100ns,
+                                       true);
        }
 
        if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
@@ -728,7 +736,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
                        ptp_v2 = PTP_TCR_TSVER2ENA;
                        snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
-                       if (priv->synopsys_id != DWMAC_CORE_5_10)
+                       if (priv->synopsys_id < DWMAC_CORE_4_10)
                                ts_event_en = PTP_TCR_TSEVNTENA;
                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
@@ -1034,7 +1042,7 @@ static void stmmac_mac_link_down(struct phylink_config *config,
        stmmac_mac_set(priv, priv->ioaddr, false);
        priv->eee_active = false;
        priv->tx_lpi_enabled = false;
-       stmmac_eee_init(priv);
+       priv->eee_enabled = stmmac_eee_init(priv);
        stmmac_set_eee_pls(priv, priv->hw, false);
 
        if (priv->dma_cap.fpesel)
index 62cec9bfcd33722d6b124695fcd327ca6d238248..232ac98943cd08a66a4bd65b53c2d100843d29f1 100644 (file)
@@ -508,6 +508,14 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
                plat->pmt = 1;
        }
 
+       if (of_device_is_compatible(np, "snps,dwmac-3.40a")) {
+               plat->has_gmac = 1;
+               plat->enh_desc = 1;
+               plat->tx_coe = 1;
+               plat->bugged_jumbo = 1;
+               plat->pmt = 1;
+       }
+
        if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
            of_device_is_compatible(np, "snps,dwmac-4.10a") ||
            of_device_is_compatible(np, "snps,dwmac-4.20a") ||
index 309de38a7530432e6d0bf56337002ff4d694683b..b0d3f9a2950c023333a88dca8b8e44614af547f0 100644 (file)
@@ -73,6 +73,7 @@ config CASSINI
 config SUNVNET_COMMON
        tristate "Common routines to support Sun Virtual Networking"
        depends on SUN_LDOMS
+       depends on INET
        default m
 
 config SUNVNET
index f4843f9672c1a594bb60073ec77fd478ea7dc7a0..441da03c23ee46cf720260880c09acf6eaf1f6cd 100644 (file)
@@ -48,6 +48,7 @@ config BPQETHER
 config DMASCC
        tristate "High-speed (DMA) SCC driver for AX.25"
        depends on ISA && AX25 && BROKEN_ON_SMP && ISA_DMA_API
+       depends on VIRT_TO_BUS
        help
          This is a driver for high-speed SCC boards, i.e. those supporting
          DMA on one port. You usually use those boards to connect your
index 775dcf4ebde580829431b8b5dad96d9a6267c1f4..6b6f28d5b8d5d3f26e59c5580efa8bf1b4978b10 100644 (file)
@@ -623,16 +623,16 @@ static int receive(struct net_device *dev, int cnt)
 
 /* --------------------------------------------------------------------- */
 
-#ifdef __i386__
+#if defined(__i386__) && !defined(CONFIG_UML)
 #include <asm/msr.h>
 #define GETTICK(x)                                             \
 ({                                                             \
        if (boot_cpu_has(X86_FEATURE_TSC))                      \
                x = (unsigned int)rdtsc();                      \
 })
-#else /* __i386__ */
+#else /* __i386__  && !CONFIG_UML */
 #define GETTICK(x)
-#endif /* __i386__ */
+#endif /* __i386__  && !CONFIG_UML */
 
 static void epp_bh(struct work_struct *work)
 {
index 8f99cfa14680ae22bd81644b60187f4244ece5ae..d037682fb7adb8ab20e617fbc3053bd258b85e3c 100644 (file)
@@ -4,6 +4,7 @@ config QCOM_IPA
        depends on ARCH_QCOM || COMPILE_TEST
        depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
        select QCOM_MDT_LOADER if ARCH_QCOM
+       select QCOM_SCM
        select QCOM_QMI_HELPERS
        help
          Choose Y or M here to include support for the Qualcomm
index 0d7d3e15d2f015d0c2543dcb3835d6045cd9b742..5f4cd24a0241d33f31450d7ee6a979ad34c5fa68 100644 (file)
@@ -207,6 +207,7 @@ static int ipq4019_mdio_probe(struct platform_device *pdev)
 {
        struct ipq4019_mdio_data *priv;
        struct mii_bus *bus;
+       struct resource *res;
        int ret;
 
        bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*priv));
@@ -224,7 +225,10 @@ static int ipq4019_mdio_probe(struct platform_device *pdev)
                return PTR_ERR(priv->mdio_clk);
 
        /* The platform resource is provided on the chipset IPQ5018 */
-       priv->eth_ldo_rdy = devm_platform_ioremap_resource(pdev, 1);
+       /* This resource is optional */
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       if (res)
+               priv->eth_ldo_rdy = devm_ioremap_resource(&pdev->dev, res);
 
        bus->name = "ipq4019_mdio";
        bus->read = ipq4019_mdio_read;
index 1ee592d3eae46dc0a755c534134c25664ac0dbe8..17f98f609ec823c0573538b7b00707e1f58d41b9 100644 (file)
@@ -134,8 +134,9 @@ static int mscc_miim_reset(struct mii_bus *bus)
 
 static int mscc_miim_probe(struct platform_device *pdev)
 {
-       struct mii_bus *bus;
        struct mscc_miim_dev *dev;
+       struct resource *res;
+       struct mii_bus *bus;
        int ret;
 
        bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*dev));
@@ -156,10 +157,14 @@ static int mscc_miim_probe(struct platform_device *pdev)
                return PTR_ERR(dev->regs);
        }
 
-       dev->phy_regs = devm_platform_ioremap_resource(pdev, 1);
-       if (IS_ERR(dev->phy_regs)) {
-               dev_err(&pdev->dev, "Unable to map internal phy registers\n");
-               return PTR_ERR(dev->phy_regs);
+       /* This resource is optional */
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       if (res) {
+               dev->phy_regs = devm_ioremap_resource(&pdev->dev, res);
+               if (IS_ERR(dev->phy_regs)) {
+                       dev_err(&pdev->dev, "Unable to map internal phy registers\n");
+                       return PTR_ERR(dev->phy_regs);
+               }
        }
 
        ret = of_mdiobus_register(bus, pdev->dev.of_node);
index d127eb6e9257f6e39acb6321ea0eb55ee64f6d5d..aaa628f859fd4f3af8591e90b9f5fe0a9e569350 100644 (file)
@@ -321,7 +321,7 @@ static int mhi_net_newlink(struct mhi_device *mhi_dev, struct net_device *ndev)
        /* Start MHI channels */
        err = mhi_prepare_for_transfer(mhi_dev);
        if (err)
-               goto out_err;
+               return err;
 
        /* Number of transfer descriptors determines size of the queue */
        mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
@@ -331,10 +331,6 @@ static int mhi_net_newlink(struct mhi_device *mhi_dev, struct net_device *ndev)
                return err;
 
        return 0;
-
-out_err:
-       free_netdev(ndev);
-       return err;
 }
 
 static void mhi_net_dellink(struct mhi_device *mhi_dev, struct net_device *ndev)
index fb0a83dc09acc02010c6c5a2597b11911ec65b69..7de631f5356fc832588fe54aef0168f32c575966 100644 (file)
@@ -666,6 +666,10 @@ int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable)
 {
        int ret;
 
+       ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0);
+       if (ret < 0)
+               return ret;
+
        if (enable) {
        /* Enable EEE */
                ret = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN |
@@ -673,9 +677,6 @@ int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable)
                      DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL |
                      mult_fact_100ns << DW_VR_MII_EEE_MULT_FACT_100NS_SHIFT;
        } else {
-               ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0);
-               if (ret < 0)
-                       return ret;
                ret &= ~(DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN |
                       DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN |
                       DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL |
@@ -690,21 +691,28 @@ int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable)
        if (ret < 0)
                return ret;
 
-       ret |= DW_VR_MII_EEE_TRN_LPI;
+       if (enable)
+               ret |= DW_VR_MII_EEE_TRN_LPI;
+       else
+               ret &= ~DW_VR_MII_EEE_TRN_LPI;
+
        return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL1, ret);
 }
 EXPORT_SYMBOL_GPL(xpcs_config_eee);
 
 static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, unsigned int mode)
 {
-       int ret;
+       int ret, mdio_ctrl;
 
        /* For AN for C37 SGMII mode, the settings are :-
-        * 1) VR_MII_AN_CTRL Bit(2:1)[PCS_MODE] = 10b (SGMII AN)
-        * 2) VR_MII_AN_CTRL Bit(3) [TX_CONFIG] = 0b (MAC side SGMII)
+        * 1) VR_MII_MMD_CTRL Bit(12) [AN_ENABLE] = 0b (Disable SGMII AN in case
+             it is already enabled)
+        * 2) VR_MII_AN_CTRL Bit(2:1)[PCS_MODE] = 10b (SGMII AN)
+        * 3) VR_MII_AN_CTRL Bit(3) [TX_CONFIG] = 0b (MAC side SGMII)
         *    DW xPCS used with DW EQoS MAC is always MAC side SGMII.
-        * 3) VR_MII_DIG_CTRL1 Bit(9) [MAC_AUTO_SW] = 1b (Automatic
+        * 4) VR_MII_DIG_CTRL1 Bit(9) [MAC_AUTO_SW] = 1b (Automatic
         *    speed/duplex mode change by HW after SGMII AN complete)
+        * 5) VR_MII_MMD_CTRL Bit(12) [AN_ENABLE] = 1b (Enable SGMII AN)
         *
         * Note: Since it is MAC side SGMII, there is no need to set
         *       SR_MII_AN_ADV. MAC side SGMII receives AN Tx Config from
@@ -712,6 +720,17 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, unsigned int mode)
         *       between PHY and Link Partner. There is also no need to
         *       trigger AN restart for MAC-side SGMII.
         */
+       mdio_ctrl = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL);
+       if (mdio_ctrl < 0)
+               return mdio_ctrl;
+
+       if (mdio_ctrl & AN_CL37_EN) {
+               ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL,
+                                mdio_ctrl & ~AN_CL37_EN);
+               if (ret < 0)
+                       return ret;
+       }
+
        ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL);
        if (ret < 0)
                return ret;
@@ -736,7 +755,15 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, unsigned int mode)
        else
                ret &= ~DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
 
-       return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret);
+       ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret);
+       if (ret < 0)
+               return ret;
+
+       if (phylink_autoneg_inband(mode))
+               ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL,
+                                mdio_ctrl | AN_CL37_EN);
+
+       return ret;
 }
 
 static int xpcs_config_2500basex(struct dw_xpcs *xpcs)
index e79297a4bae81dffb7095c0cdeb465f04e675e77..27b6a3f507ae60d9e7e836f35fd6c373be010182 100644 (file)
 #define MII_BCM7XXX_SHD_2_ADDR_CTRL    0xe
 #define MII_BCM7XXX_SHD_2_CTRL_STAT    0xf
 #define MII_BCM7XXX_SHD_2_BIAS_TRIM    0x1a
+#define MII_BCM7XXX_SHD_3_PCS_CTRL     0x0
+#define MII_BCM7XXX_SHD_3_PCS_STATUS   0x1
+#define MII_BCM7XXX_SHD_3_EEE_CAP      0x2
 #define MII_BCM7XXX_SHD_3_AN_EEE_ADV   0x3
+#define MII_BCM7XXX_SHD_3_EEE_LP       0x4
+#define MII_BCM7XXX_SHD_3_EEE_WK_ERR   0x5
 #define MII_BCM7XXX_SHD_3_PCS_CTRL_2   0x6
 #define  MII_BCM7XXX_PCS_CTRL_2_DEF    0x4400
 #define MII_BCM7XXX_SHD_3_AN_STAT      0xb
@@ -216,25 +221,37 @@ static int bcm7xxx_28nm_resume(struct phy_device *phydev)
        return genphy_config_aneg(phydev);
 }
 
-static int phy_set_clr_bits(struct phy_device *dev, int location,
-                                       int set_mask, int clr_mask)
+static int __phy_set_clr_bits(struct phy_device *dev, int location,
+                             int set_mask, int clr_mask)
 {
        int v, ret;
 
-       v = phy_read(dev, location);
+       v = __phy_read(dev, location);
        if (v < 0)
                return v;
 
        v &= ~clr_mask;
        v |= set_mask;
 
-       ret = phy_write(dev, location, v);
+       ret = __phy_write(dev, location, v);
        if (ret < 0)
                return ret;
 
        return v;
 }
 
+static int phy_set_clr_bits(struct phy_device *dev, int location,
+                           int set_mask, int clr_mask)
+{
+       int ret;
+
+       mutex_lock(&dev->mdio.bus->mdio_lock);
+       ret = __phy_set_clr_bits(dev, location, set_mask, clr_mask);
+       mutex_unlock(&dev->mdio.bus->mdio_lock);
+
+       return ret;
+}
+
 static int bcm7xxx_28nm_ephy_01_afe_config_init(struct phy_device *phydev)
 {
        int ret;
@@ -398,6 +415,93 @@ static int bcm7xxx_28nm_ephy_config_init(struct phy_device *phydev)
        return bcm7xxx_28nm_ephy_apd_enable(phydev);
 }
 
+#define MII_BCM7XXX_REG_INVALID        0xff
+
+static u8 bcm7xxx_28nm_ephy_regnum_to_shd(u16 regnum)
+{
+       switch (regnum) {
+       case MDIO_CTRL1:
+               return MII_BCM7XXX_SHD_3_PCS_CTRL;
+       case MDIO_STAT1:
+               return MII_BCM7XXX_SHD_3_PCS_STATUS;
+       case MDIO_PCS_EEE_ABLE:
+               return MII_BCM7XXX_SHD_3_EEE_CAP;
+       case MDIO_AN_EEE_ADV:
+               return MII_BCM7XXX_SHD_3_AN_EEE_ADV;
+       case MDIO_AN_EEE_LPABLE:
+               return MII_BCM7XXX_SHD_3_EEE_LP;
+       case MDIO_PCS_EEE_WK_ERR:
+               return MII_BCM7XXX_SHD_3_EEE_WK_ERR;
+       default:
+               return MII_BCM7XXX_REG_INVALID;
+       }
+}
+
+static bool bcm7xxx_28nm_ephy_dev_valid(int devnum)
+{
+       return devnum == MDIO_MMD_AN || devnum == MDIO_MMD_PCS;
+}
+
+static int bcm7xxx_28nm_ephy_read_mmd(struct phy_device *phydev,
+                                     int devnum, u16 regnum)
+{
+       u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum);
+       int ret;
+
+       if (!bcm7xxx_28nm_ephy_dev_valid(devnum) ||
+           shd == MII_BCM7XXX_REG_INVALID)
+               return -EOPNOTSUPP;
+
+       /* set shadow mode 2 */
+       ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
+                                MII_BCM7XXX_SHD_MODE_2, 0);
+       if (ret < 0)
+               return ret;
+
+       /* Access the desired shadow register address */
+       ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd);
+       if (ret < 0)
+               goto reset_shadow_mode;
+
+       ret = __phy_read(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT);
+
+reset_shadow_mode:
+       /* reset shadow mode 2 */
+       __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
+                          MII_BCM7XXX_SHD_MODE_2);
+       return ret;
+}
+
+static int bcm7xxx_28nm_ephy_write_mmd(struct phy_device *phydev,
+                                      int devnum, u16 regnum, u16 val)
+{
+       u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum);
+       int ret;
+
+       if (!bcm7xxx_28nm_ephy_dev_valid(devnum) ||
+           shd == MII_BCM7XXX_REG_INVALID)
+               return -EOPNOTSUPP;
+
+       /* set shadow mode 2 */
+       ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
+                                MII_BCM7XXX_SHD_MODE_2, 0);
+       if (ret < 0)
+               return ret;
+
+       /* Access the desired shadow register address */
+       ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd);
+       if (ret < 0)
+               goto reset_shadow_mode;
+
+       /* Write the desired value in the shadow register */
+       __phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT, val);
+
+reset_shadow_mode:
+       /* reset shadow mode 2 */
+       return __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
+                                 MII_BCM7XXX_SHD_MODE_2);
+}
+
 static int bcm7xxx_28nm_ephy_resume(struct phy_device *phydev)
 {
        int ret;
@@ -595,6 +699,8 @@ static void bcm7xxx_28nm_remove(struct phy_device *phydev)
        .get_stats      = bcm7xxx_28nm_get_phy_stats,                   \
        .probe          = bcm7xxx_28nm_probe,                           \
        .remove         = bcm7xxx_28nm_remove,                          \
+       .read_mmd       = bcm7xxx_28nm_ephy_read_mmd,                   \
+       .write_mmd      = bcm7xxx_28nm_ephy_write_mmd,                  \
 }
 
 #define BCM7XXX_40NM_EPHY(_oui, _name)                                 \
index 53f034fc2ef7983527faa32fc7589e93eb8f04bc..6865d9319197f716c18aec179cb4db74ea78e8da 100644 (file)
@@ -525,6 +525,10 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
            NULL == bus->read || NULL == bus->write)
                return -EINVAL;
 
+       if (bus->parent && bus->parent->of_node)
+               bus->parent->of_node->fwnode.flags |=
+                                       FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD;
+
        BUG_ON(bus->state != MDIOBUS_ALLOCATED &&
               bus->state != MDIOBUS_UNREGISTERED);
 
@@ -534,6 +538,13 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
        bus->dev.groups = NULL;
        dev_set_name(&bus->dev, "%s", bus->id);
 
+       /* We need to set state to MDIOBUS_UNREGISTERED to correctly release
+        * the device in mdiobus_free()
+        *
+        * State will be updated later in this function in case of success
+        */
+       bus->state = MDIOBUS_UNREGISTERED;
+
        err = device_register(&bus->dev);
        if (err) {
                pr_err("mii_bus %s failed to register\n", bus->id);
index 2d5d5081c3b6b01bbeb86f19ad74e0ba1ae2f9d5..5ce1bf03bbd71a11a094c387820bc0bd813f45ae 100644 (file)
@@ -493,6 +493,25 @@ static int gpy_loopback(struct phy_device *phydev, bool enable)
        return ret;
 }
 
+static int gpy115_loopback(struct phy_device *phydev, bool enable)
+{
+       int ret;
+       int fw_minor;
+
+       if (enable)
+               return gpy_loopback(phydev, enable);
+
+       ret = phy_read(phydev, PHY_FWV);
+       if (ret < 0)
+               return ret;
+
+       fw_minor = FIELD_GET(PHY_FWV_MINOR_MASK, ret);
+       if (fw_minor > 0x0076)
+               return gpy_loopback(phydev, 0);
+
+       return genphy_soft_reset(phydev);
+}
+
 static struct phy_driver gpy_drivers[] = {
        {
                PHY_ID_MATCH_MODEL(PHY_ID_GPY2xx),
@@ -527,7 +546,7 @@ static struct phy_driver gpy_drivers[] = {
                .handle_interrupt = gpy_handle_interrupt,
                .set_wol        = gpy_set_wol,
                .get_wol        = gpy_get_wol,
-               .set_loopback   = gpy_loopback,
+               .set_loopback   = gpy115_loopback,
        },
        {
                PHY_ID_MATCH_MODEL(PHY_ID_GPY115C),
@@ -544,7 +563,7 @@ static struct phy_driver gpy_drivers[] = {
                .handle_interrupt = gpy_handle_interrupt,
                .set_wol        = gpy_set_wol,
                .get_wol        = gpy_get_wol,
-               .set_loopback   = gpy_loopback,
+               .set_loopback   = gpy115_loopback,
        },
        {
                .phy_id         = PHY_ID_GPY211B,
index ba5ad86ec8261256af4523e6ddb1f00b98bc5cf1..4f9990b47a377de462454378ce3888f6274d1a79 100644 (file)
@@ -3125,6 +3125,9 @@ static void phy_shutdown(struct device *dev)
 {
        struct phy_device *phydev = to_phy_device(dev);
 
+       if (phydev->state == PHY_READY || !phydev->attached_dev)
+               return;
+
        phy_disable_interrupts(phydev);
 }
 
index 34e90216bd2cb7fedcbf0f22637cffc853ffe46b..ab77a9f439ef9a51e6095fc371a39fd3437a2b7c 100644 (file)
@@ -134,7 +134,7 @@ static const char * const sm_state_strings[] = {
        [SFP_S_LINK_UP] = "link_up",
        [SFP_S_TX_FAULT] = "tx_fault",
        [SFP_S_REINIT] = "reinit",
-       [SFP_S_TX_DISABLE] = "rx_disable",
+       [SFP_S_TX_DISABLE] = "tx_disable",
 };
 
 static const char *sm_state_to_str(unsigned short sm_state)
index 4c5d69732a7e125528fdc6f519d873f9c8181503..b554054a7560aa6401d6072993fbbe16876de6ab 100644 (file)
@@ -99,6 +99,10 @@ config USB_RTL8150
 config USB_RTL8152
        tristate "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
        select MII
+       select CRC32
+       select CRYPTO
+       select CRYPTO_HASH
+       select CRYPTO_SHA256
        help
          This option adds support for Realtek RTL8152 based USB 2.0
          10/100 Ethernet adapters and RTL8153 based USB 3.0 10/100/1000
@@ -113,6 +117,7 @@ config USB_LAN78XX
        select PHYLIB
        select MICROCHIP_PHY
        select FIXED_PHY
+       select CRC32
        help
          This option adds support for Microchip LAN78XX based USB 2
          & USB 3 10/100/1000 Ethernet adapters.
index 60ba9b734055ba3da6f36ac117021805a22a9edb..f329e39100a7dd0782310c98e8d63582791ed95a 100644 (file)
@@ -767,6 +767,7 @@ enum rtl8152_flags {
        PHY_RESET,
        SCHEDULE_TASKLET,
        GREEN_ETHERNET,
+       RX_EPROTO,
 };
 
 #define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2      0x3082
@@ -1770,6 +1771,14 @@ static void read_bulk_callback(struct urb *urb)
                rtl_set_unplug(tp);
                netif_device_detach(tp->netdev);
                return;
+       case -EPROTO:
+               urb->actual_length = 0;
+               spin_lock_irqsave(&tp->rx_lock, flags);
+               list_add_tail(&agg->list, &tp->rx_done);
+               spin_unlock_irqrestore(&tp->rx_lock, flags);
+               set_bit(RX_EPROTO, &tp->flags);
+               schedule_delayed_work(&tp->schedule, 1);
+               return;
        case -ENOENT:
                return; /* the urb is in unlink state */
        case -ETIME:
@@ -2425,6 +2434,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
        if (list_empty(&tp->rx_done))
                goto out1;
 
+       clear_bit(RX_EPROTO, &tp->flags);
        INIT_LIST_HEAD(&rx_queue);
        spin_lock_irqsave(&tp->rx_lock, flags);
        list_splice_init(&tp->rx_done, &rx_queue);
@@ -2441,7 +2451,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
 
                agg = list_entry(cursor, struct rx_agg, list);
                urb = agg->urb;
-               if (urb->actual_length < ETH_ZLEN)
+               if (urb->status != 0 || urb->actual_length < ETH_ZLEN)
                        goto submit;
 
                agg_free = rtl_get_free_rx(tp, GFP_ATOMIC);
@@ -6643,6 +6653,10 @@ static void rtl_work_func_t(struct work_struct *work)
            netif_carrier_ok(tp->netdev))
                tasklet_schedule(&tp->tx_tl);
 
+       if (test_and_clear_bit(RX_EPROTO, &tp->flags) &&
+           !list_empty(&tp->rx_done))
+               napi_schedule(&tp->napi);
+
        mutex_unlock(&tp->control);
 
 out1:
index 7d953974eb9b5d61f326fae1a81448945bbbe2fb..26b1bd8e845b437b7fdff7e22d8c4803fc020268 100644 (file)
@@ -1178,7 +1178,10 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
 
 static void smsc95xx_handle_link_change(struct net_device *net)
 {
+       struct usbnet *dev = netdev_priv(net);
+
        phy_print_status(net->phydev);
+       usbnet_defer_kevent(dev, EVENT_LINK_CHANGE);
 }
 
 static int smsc95xx_start_phy(struct usbnet *dev)
index 840c1c2ab16afa192b15b17760de1f0dd384aed5..80432ee0ce694773477acfda06cf40a3639ba58f 100644 (file)
@@ -1788,6 +1788,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
        if (!dev->rx_urb_size)
                dev->rx_urb_size = dev->hard_mtu;
        dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
+       if (dev->maxpacket == 0) {
+               /* that is a broken device */
+               goto out4;
+       }
 
        /* let userspace know we have a random address */
        if (ether_addr_equal(net->dev_addr, node_id))
index 79bd2585ec6b2d16d76280d189d775313ccfd530..4ad25a8b0870c67f40c2cc8a6a0f6eddaded09dd 100644 (file)
@@ -406,7 +406,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
         * add_recvbuf_mergeable() + get_mergeable_buf_len()
         */
        truesize = headroom ? PAGE_SIZE : truesize;
-       tailroom = truesize - len - headroom;
+       tailroom = truesize - len - headroom - (hdr_padded_len - hdr_len);
        buf = p - headroom;
 
        len -= hdr_len;
index bf2fac913942838124aa5f8575a8593534f98467..662e261173539fca7392ed1ff93bfa993ac82c5f 100644 (file)
@@ -1360,8 +1360,6 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
        bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
        bool is_ndisc = ipv6_ndisc_frame(skb);
 
-       nf_reset_ct(skb);
-
        /* loopback, multicast & non-ND link-local traffic; do not push through
         * packet taps again. Reset pkt_type for upper layers to process skb.
         * For strict packets with a source LLA, determine the dst using the
@@ -1424,8 +1422,6 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
        skb->skb_iif = vrf_dev->ifindex;
        IPCB(skb)->flags |= IPSKB_L3SLAVE;
 
-       nf_reset_ct(skb);
-
        if (ipv4_is_multicast(ip_hdr(skb)->daddr))
                goto out;
 
index 741289e385d59411715b60a43aac1ddaa9b50199..ca007b800f75607eaba3a0b842348e2617f5cf40 100644 (file)
@@ -44,7 +44,7 @@ config ATH10K_SNOC
        tristate "Qualcomm ath10k SNOC support"
        depends on ATH10K
        depends on ARCH_QCOM || COMPILE_TEST
-       depends on QCOM_SCM || !QCOM_SCM #if QCOM_SCM=m this can't be =y
+       select QCOM_SCM
        select QCOM_QMI_HELPERS
        help
          This module adds support for integrated WCN3990 chip connected
index f35cd8de228e49f4db2c3fef795b131f5c30ad1a..6914b37bb0fbcf4e35334ec5e13c4c8d3b738f88 100644 (file)
@@ -3,9 +3,7 @@ config ATH5K
        tristate "Atheros 5xxx wireless cards support"
        depends on (PCI || ATH25) && MAC80211
        select ATH_COMMON
-       select MAC80211_LEDS
-       select LEDS_CLASS
-       select NEW_LEDS
+       select MAC80211_LEDS if LEDS_CLASS=y || LEDS_CLASS=MAC80211
        select ATH5K_AHB if ATH25
        select ATH5K_PCI if !ATH25
        help
index 6a2a168567630fdbf8ea47818c3a336876b04df4..33e9928af36354dda8848a50f93ea53523a9e694 100644 (file)
@@ -89,7 +89,8 @@ static const struct pci_device_id ath5k_led_devices[] = {
 
 void ath5k_led_enable(struct ath5k_hw *ah)
 {
-       if (test_bit(ATH_STAT_LEDSOFT, ah->status)) {
+       if (IS_ENABLED(CONFIG_MAC80211_LEDS) &&
+           test_bit(ATH_STAT_LEDSOFT, ah->status)) {
                ath5k_hw_set_gpio_output(ah, ah->led_pin);
                ath5k_led_off(ah);
        }
@@ -104,7 +105,8 @@ static void ath5k_led_on(struct ath5k_hw *ah)
 
 void ath5k_led_off(struct ath5k_hw *ah)
 {
-       if (!test_bit(ATH_STAT_LEDSOFT, ah->status))
+       if (!IS_ENABLED(CONFIG_MAC80211_LEDS) ||
+           !test_bit(ATH_STAT_LEDSOFT, ah->status))
                return;
        ath5k_hw_set_gpio(ah, ah->led_pin, !ah->led_on);
 }
@@ -146,7 +148,7 @@ ath5k_register_led(struct ath5k_hw *ah, struct ath5k_led *led,
 static void
 ath5k_unregister_led(struct ath5k_led *led)
 {
-       if (!led->ah)
+       if (!IS_ENABLED(CONFIG_MAC80211_LEDS) || !led->ah)
                return;
        led_classdev_unregister(&led->led_dev);
        ath5k_led_off(led->ah);
@@ -169,7 +171,7 @@ int ath5k_init_leds(struct ath5k_hw *ah)
        char name[ATH5K_LED_MAX_NAME_LEN + 1];
        const struct pci_device_id *match;
 
-       if (!ah->pdev)
+       if (!IS_ENABLED(CONFIG_MAC80211_LEDS) || !ah->pdev)
                return 0;
 
 #ifdef CONFIG_ATH5K_AHB
index f7b96cd69242d0fa38cb265a94490e48ca306236..9db12ffd2ff8010f4ea7a90a8c20a2f3d431850d 100644 (file)
@@ -7463,23 +7463,18 @@ static s32 brcmf_translate_country_code(struct brcmf_pub *drvr, char alpha2[2],
        s32 found_index;
        int i;
 
+       country_codes = drvr->settings->country_codes;
+       if (!country_codes) {
+               brcmf_dbg(TRACE, "No country codes configured for device\n");
+               return -EINVAL;
+       }
+
        if ((alpha2[0] == ccreq->country_abbrev[0]) &&
            (alpha2[1] == ccreq->country_abbrev[1])) {
                brcmf_dbg(TRACE, "Country code already set\n");
                return -EAGAIN;
        }
 
-       country_codes = drvr->settings->country_codes;
-       if (!country_codes) {
-               brcmf_dbg(TRACE, "No country codes configured for device, using ISO3166 code and 0 rev\n");
-               memset(ccreq, 0, sizeof(*ccreq));
-               ccreq->country_abbrev[0] = alpha2[0];
-               ccreq->country_abbrev[1] = alpha2[1];
-               ccreq->ccode[0] = alpha2[0];
-               ccreq->ccode[1] = alpha2[1];
-               return 0;
-       }
-
        found_index = -1;
        for (i = 0; i < country_codes->table_size; i++) {
                cc = &country_codes->table[i];
index 0e97d5e6c6448f64aba4356a81b857657871c9a8..9f706fffb5922003f578f960569539ce26425398 100644 (file)
@@ -160,6 +160,7 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
                mvm->ptk_icvlen = key->icv_len;
                mvm->gtk_ivlen = key->iv_len;
                mvm->gtk_icvlen = key->icv_len;
+               mutex_unlock(&mvm->mutex);
 
                /* don't upload key again */
                return;
@@ -360,11 +361,11 @@ static void iwl_mvm_wowlan_get_rsc_v5_data(struct ieee80211_hw *hw,
        if (sta) {
                rsc = data->rsc->ucast_rsc;
        } else {
-               if (WARN_ON(data->gtks > ARRAY_SIZE(data->gtk_ids)))
+               if (WARN_ON(data->gtks >= ARRAY_SIZE(data->gtk_ids)))
                        return;
                data->gtk_ids[data->gtks] = key->keyidx;
                rsc = data->rsc->mcast_rsc[data->gtks % 2];
-               if (WARN_ON(key->keyidx >
+               if (WARN_ON(key->keyidx >=
                                ARRAY_SIZE(data->rsc->mcast_key_id_map)))
                        return;
                data->rsc->mcast_key_id_map[key->keyidx] = data->gtks % 2;
index 25af88a3edcea428b2dc9bae94bdc7939adac7a7..e91f8e889df70b07dba339017ae9dc28f12cd8c1 100644 (file)
@@ -662,12 +662,13 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
                                        u32 *uid)
 {
        u32 id;
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
+       struct iwl_mvm_vif *mvmvif;
        enum nl80211_iftype iftype;
 
        if (!te_data->vif)
                return false;
 
+       mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
        iftype = te_data->vif->type;
 
        /*
index 61b2797a34a886ff1c1d3efd526a08b79a60e7d5..e3996ff99bad54c77616e93ba360728eddb367e4 100644 (file)
@@ -547,6 +547,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
        IWL_DEV_INFO(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
        IWL_DEV_INFO(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
        IWL_DEV_INFO(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
+       IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650s_name),
+       IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650i_name),
        IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
        IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
        IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
index ffa894f7312a47a0bfd4277fb3c5a02130fe86dd..0adae76eb8df1d37958122210cc63975f7a4adbb 100644 (file)
@@ -1867,8 +1867,8 @@ mac80211_hwsim_beacon(struct hrtimer *timer)
                bcn_int -= data->bcn_delta;
                data->bcn_delta = 0;
        }
-       hrtimer_forward(&data->beacon_timer, hrtimer_get_expires(timer),
-                       ns_to_ktime(bcn_int * NSEC_PER_USEC));
+       hrtimer_forward_now(&data->beacon_timer,
+                           ns_to_ktime(bcn_int * NSEC_PER_USEC));
        return HRTIMER_RESTART;
 }
 
index 241305377e2069ea460fe266cb264887d504f878..a9b5eb992220da5b896a2c5d087b4c3dbe24422c 100644 (file)
@@ -62,8 +62,8 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
 
        pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
 
-       pad = ((void *)skb->data - (sizeof(*local_tx_pd) + hroom)-
-                        NULL) & (MWIFIEX_DMA_ALIGN_SZ - 1);
+       pad = ((uintptr_t)skb->data - (sizeof(*local_tx_pd) + hroom)) &
+              (MWIFIEX_DMA_ALIGN_SZ - 1);
        skb_push(skb, sizeof(*local_tx_pd) + pad);
 
        local_tx_pd = (struct txpd *) skb->data;
index 9bbdb8dfce62ae1de23f142208c71bb5a16cb21f..245ff644f81e33418828552bc38067754e7410c4 100644 (file)
@@ -475,8 +475,8 @@ void *mwifiex_process_uap_txpd(struct mwifiex_private *priv,
 
        pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
 
-       pad = ((void *)skb->data - (sizeof(*txpd) + hroom) - NULL) &
-                       (MWIFIEX_DMA_ALIGN_SZ - 1);
+       pad = ((uintptr_t)skb->data - (sizeof(*txpd) + hroom)) &
+              (MWIFIEX_DMA_ALIGN_SZ - 1);
 
        skb_push(skb, sizeof(*txpd) + pad);
 
index d16cf3ff644e4811b9435c635f8f7bcdc96333e8..b23f47936473d9eb93f8cd6d0f023eb9f6a54eff 100644 (file)
@@ -1226,11 +1226,9 @@ static int st95hf_remove(struct spi_device *nfc_spi_dev)
                                 &reset_cmd,
                                 ST95HF_RESET_CMD_LEN,
                                 ASYNC);
-       if (result) {
+       if (result)
                dev_err(&spictx->spidev->dev,
                        "ST95HF reset failed in remove() err = %d\n", result);
-               return result;
-       }
 
        /* wait for 3 ms to complete the controller reset process */
        usleep_range(3000, 4000);
@@ -1239,7 +1237,7 @@ static int st95hf_remove(struct spi_device *nfc_spi_dev)
        if (stcontext->st95hf_supply)
                regulator_disable(stcontext->st95hf_supply);
 
-       return result;
+       return 0;
 }
 
 /* Register as SPI protocol driver */
index 72de88ff0d30d2bf359aa55bd032264119ee159a..ef4950f8083263c65f10888d92f9b314d2decdbf 100644 (file)
@@ -380,7 +380,6 @@ static int pmem_attach_disk(struct device *dev,
        struct nd_pfn_sb *pfn_sb;
        struct pmem_device *pmem;
        struct request_queue *q;
-       struct device *gendev;
        struct gendisk *disk;
        void *addr;
        int rc;
@@ -489,10 +488,8 @@ static int pmem_attach_disk(struct device *dev,
        }
        dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
        pmem->dax_dev = dax_dev;
-       gendev = disk_to_dev(disk);
-       gendev->groups = pmem_attribute_groups;
 
-       device_add_disk(dev, disk, NULL);
+       device_add_disk(dev, disk, pmem_attribute_groups);
        if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
                return -ENOMEM;
 
index e486845d2c7eb7ba5c2c9bcc393957b7adeebf2b..f8dd664b2eda52f377a1b30e707a5e36b0e6a66d 100644 (file)
@@ -978,6 +978,7 @@ EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
 {
        struct nvme_command *cmd = nvme_req(req)->cmd;
+       struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
        blk_status_t ret = BLK_STS_OK;
 
        if (!(req->rq_flags & RQF_DONTPREP)) {
@@ -1026,7 +1027,8 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
                return BLK_STS_IOERR;
        }
 
-       nvme_req(req)->genctr++;
+       if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
+               nvme_req(req)->genctr++;
        cmd->common.command_id = nvme_cid(req);
        trace_nvme_setup_cmd(req, cmd);
        return ret;
@@ -3548,10 +3550,15 @@ static int __nvme_check_ids(struct nvme_subsystem *subsys,
        return 0;
 }
 
+static void nvme_cdev_rel(struct device *dev)
+{
+       ida_simple_remove(&nvme_ns_chr_minor_ida, MINOR(dev->devt));
+}
+
 void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device)
 {
        cdev_device_del(cdev, cdev_device);
-       ida_simple_remove(&nvme_ns_chr_minor_ida, MINOR(cdev_device->devt));
+       put_device(cdev_device);
 }
 
 int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
@@ -3564,14 +3571,14 @@ int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
                return minor;
        cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
        cdev_device->class = nvme_ns_chr_class;
+       cdev_device->release = nvme_cdev_rel;
        device_initialize(cdev_device);
        cdev_init(cdev, fops);
        cdev->owner = owner;
        ret = cdev_device_add(cdev, cdev_device);
-       if (ret) {
+       if (ret)
                put_device(cdev_device);
-               ida_simple_remove(&nvme_ns_chr_minor_ida, minor);
-       }
+
        return ret;
 }
 
@@ -3603,11 +3610,9 @@ static int nvme_add_ns_cdev(struct nvme_ns *ns)
                           ns->ctrl->instance, ns->head->instance);
        if (ret)
                return ret;
-       ret = nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
-                           ns->ctrl->ops->module);
-       if (ret)
-               kfree_const(ns->cdev_device.kobj.name);
-       return ret;
+
+       return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
+                            ns->ctrl->ops->module);
 }
 
 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
index e8ccdd398f784443241d659761414f0e622c111b..fba06618c6c23f0c948deeb5e1797fc1d1521013 100644 (file)
@@ -431,8 +431,6 @@ static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
                return ret;
        ret = nvme_cdev_add(&head->cdev, &head->cdev_device,
                            &nvme_ns_head_chr_fops, THIS_MODULE);
-       if (ret)
-               kfree_const(head->cdev_device.kobj.name);
        return ret;
 }
 
index 9871c0c9374c4c40d7672bab0f663f290b5f34ce..ed79a6c7e8043b50d9770d278d0b8316436e4bbf 100644 (file)
@@ -138,6 +138,12 @@ enum nvme_quirks {
         * 48 bits.
         */
        NVME_QUIRK_DMA_ADDRESS_BITS_48          = (1 << 16),
+
+       /*
+        * The controller requires the command_id value be be limited, so skip
+        * encoding the generation sequence number.
+        */
+       NVME_QUIRK_SKIP_CID_GEN                 = (1 << 17),
 };
 
 /*
index b82492cd750330803f48e58f6c3e1a3d7d7daaff..149ecf73df384b2091a7f22df1308713c2bbd90a 100644 (file)
@@ -1330,7 +1330,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
        iod->aborted = 1;
 
        cmd.abort.opcode = nvme_admin_abort_cmd;
-       cmd.abort.cid = req->tag;
+       cmd.abort.cid = nvme_cid(req);
        cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
 
        dev_warn(nvmeq->dev->ctrl.device,
@@ -3369,7 +3369,8 @@ static const struct pci_device_id nvme_id_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
                .driver_data = NVME_QUIRK_SINGLE_VECTOR |
                                NVME_QUIRK_128_BYTES_SQES |
-                               NVME_QUIRK_SHARED_TAGS },
+                               NVME_QUIRK_SHARED_TAGS |
+                               NVME_QUIRK_SKIP_CID_GEN },
 
        { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
        { 0, }
index 3d87fadaa160d5242bb0c99a3c08693ad026b9f1..8976da38b375a1b481cc8b7b41c84d2297e1423d 100644 (file)
@@ -1383,7 +1383,8 @@ static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
                *p-- = 0;
 
        /* clear msb bits if any leftover in the last byte */
-       *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
+       if (cell->nbits % BITS_PER_BYTE)
+               *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
 }
 
 static int __nvmem_cell_read(struct nvmem_device *nvmem,
index f720c0d246f2706f16ed9a1a84125314096653ed..0ac17256258d59b94adee28100bd2c2ef7af2d6b 100644 (file)
@@ -36,6 +36,7 @@ LIST_HEAD(aliases_lookup);
 struct device_node *of_root;
 EXPORT_SYMBOL(of_root);
 struct device_node *of_chosen;
+EXPORT_SYMBOL(of_chosen);
 struct device_node *of_aliases;
 struct device_node *of_stdout;
 static const char *of_stdout_options;
index 59c1390cdf423639b20c80b5e476c12e9cd20152..9da8835ba5a58148f8e6277700d5b152ff9531c9 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/sort.h>
 #include <linux/slab.h>
 #include <linux/memblock.h>
+#include <linux/kmemleak.h>
 
 #include "of_private.h"
 
@@ -46,6 +47,7 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
                err = memblock_mark_nomap(base, size);
                if (err)
                        memblock_free(base, size);
+               kmemleak_ignore_phys(base);
        }
 
        return err;
index eaec915ffe62fac109579735c39b9c683e0826c2..67c46e52c0dc3812f4d7e65b14d661b62d824eef 100644 (file)
@@ -3301,9 +3301,17 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
                return 0;
 
        if (!keep_devs) {
-               /* Delete any children which might still exist. */
+               struct list_head removed;
+
+               /* Move all present children to the list on stack */
+               INIT_LIST_HEAD(&removed);
                spin_lock_irqsave(&hbus->device_list_lock, flags);
-               list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry) {
+               list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry)
+                       list_move_tail(&hpdev->list_entry, &removed);
+               spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+               /* Remove all children in the list */
+               list_for_each_entry_safe(hpdev, tmp, &removed, list_entry) {
                        list_del(&hpdev->list_entry);
                        if (hpdev->pci_slot)
                                pci_destroy_slot(hpdev->pci_slot);
@@ -3311,7 +3319,6 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
                        put_pcichild(hpdev);
                        put_pcichild(hpdev);
                }
-               spin_unlock_irqrestore(&hbus->device_list_lock, flags);
        }
 
        ret = hv_send_resources_released(hdev);
index 014868752cd4dce46c9554862b50e78846bbf4ed..dcefdb42ac463d45dba6a32c80b0e075a4ab4752 100644 (file)
@@ -62,14 +62,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
        struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
                                             hotplug_slot);
 
-       switch (zdev->state) {
-       case ZPCI_FN_STATE_STANDBY:
-               *value = 0;
-               break;
-       default:
-               *value = 1;
-               break;
-       }
+       *value = zpci_is_device_configured(zdev) ? 1 : 0;
        return 0;
 }
 
index 0099a00af361ba657e04bb00cf8d9091f4154e3a..4b4792940e8691062affc2dd4868745c55b41fb8 100644 (file)
@@ -535,6 +535,7 @@ static int msi_verify_entries(struct pci_dev *dev)
 static int msi_capability_init(struct pci_dev *dev, int nvec,
                               struct irq_affinity *affd)
 {
+       const struct attribute_group **groups;
        struct msi_desc *entry;
        int ret;
 
@@ -558,12 +559,14 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
        if (ret)
                goto err;
 
-       dev->msi_irq_groups = msi_populate_sysfs(&dev->dev);
-       if (IS_ERR(dev->msi_irq_groups)) {
-               ret = PTR_ERR(dev->msi_irq_groups);
+       groups = msi_populate_sysfs(&dev->dev);
+       if (IS_ERR(groups)) {
+               ret = PTR_ERR(groups);
                goto err;
        }
 
+       dev->msi_irq_groups = groups;
+
        /* Set MSI enabled bits */
        pci_intx_for_msi(dev, 0);
        pci_msi_set_enable(dev, 1);
@@ -691,6 +694,7 @@ static void msix_mask_all(void __iomem *base, int tsize)
 static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
                                int nvec, struct irq_affinity *affd)
 {
+       const struct attribute_group **groups;
        void __iomem *base;
        int ret, tsize;
        u16 control;
@@ -730,12 +734,14 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
 
        msix_update_entries(dev, entries);
 
-       dev->msi_irq_groups = msi_populate_sysfs(&dev->dev);
-       if (IS_ERR(dev->msi_irq_groups)) {
-               ret = PTR_ERR(dev->msi_irq_groups);
+       groups = msi_populate_sysfs(&dev->dev);
+       if (IS_ERR(groups)) {
+               ret = PTR_ERR(groups);
                goto out_free;
        }
 
+       dev->msi_irq_groups = groups;
+
        /* Set MSI-X enabled bits and unmask the function */
        pci_intx_for_msi(dev, 0);
        dev->msix_enabled = 1;
index 0f40943a9a18a6302c4fc8079ac391f5223dbe1a..260a06fb78a6132b27ba839b082625e4c2e44e9f 100644 (file)
@@ -1249,6 +1249,9 @@ static struct acpi_device *acpi_pci_find_companion(struct device *dev)
        bool check_children;
        u64 addr;
 
+       if (!dev->parent)
+               return NULL;
+
        down_read(&pci_acpi_companion_lookup_sem);
 
        adev = pci_acpi_find_companion_hook ?
index 3cbc3baf087f3938d8bef85f79fb930d1a7f47f9..295cc7952d0edf9d622727357d1c3d607ac4aa0b 100644 (file)
@@ -952,6 +952,8 @@ int armpmu_register(struct arm_pmu *pmu)
                pmu->name, pmu->num_events,
                has_nmi ? ", using NMIs" : "");
 
+       kvm_host_pmu_init(pmu);
+
        return 0;
 
 out_destroy:
index e79690bd8b85f25a9e8a9665dd4a1cc08bbb8ca8..d7f8175d2c1c86b2d4799b3faafd0a27da402ca4 100644 (file)
@@ -5,7 +5,6 @@
 
 #include <linux/err.h>
 #include <linux/io.h>
-#include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -13,7 +12,6 @@
 #include <linux/pinctrl/pinctrl.h>
 #include <linux/pinctrl/pinmux.h>
 #include <linux/platform_device.h>
-#include <linux/regmap.h>
 #include <linux/slab.h>
 
 #define FLAG_BCM4708           BIT(1)
@@ -24,8 +22,7 @@ struct ns_pinctrl {
        struct device *dev;
        unsigned int chipset_flag;
        struct pinctrl_dev *pctldev;
-       struct regmap *regmap;
-       u32 offset;
+       void __iomem *base;
 
        struct pinctrl_desc pctldesc;
        struct ns_pinctrl_group *groups;
@@ -232,9 +229,9 @@ static int ns_pinctrl_set_mux(struct pinctrl_dev *pctrl_dev,
                unset |= BIT(pin_number);
        }
 
-       regmap_read(ns_pinctrl->regmap, ns_pinctrl->offset, &tmp);
+       tmp = readl(ns_pinctrl->base);
        tmp &= ~unset;
-       regmap_write(ns_pinctrl->regmap, ns_pinctrl->offset, tmp);
+       writel(tmp, ns_pinctrl->base);
 
        return 0;
 }
@@ -266,13 +263,13 @@ static const struct of_device_id ns_pinctrl_of_match_table[] = {
 static int ns_pinctrl_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct device_node *np = dev->of_node;
        const struct of_device_id *of_id;
        struct ns_pinctrl *ns_pinctrl;
        struct pinctrl_desc *pctldesc;
        struct pinctrl_pin_desc *pin;
        struct ns_pinctrl_group *group;
        struct ns_pinctrl_function *function;
+       struct resource *res;
        int i;
 
        ns_pinctrl = devm_kzalloc(dev, sizeof(*ns_pinctrl), GFP_KERNEL);
@@ -290,18 +287,12 @@ static int ns_pinctrl_probe(struct platform_device *pdev)
                return -EINVAL;
        ns_pinctrl->chipset_flag = (uintptr_t)of_id->data;
 
-       ns_pinctrl->regmap = syscon_node_to_regmap(of_get_parent(np));
-       if (IS_ERR(ns_pinctrl->regmap)) {
-               int err = PTR_ERR(ns_pinctrl->regmap);
-
-               dev_err(dev, "Failed to map pinctrl regs: %d\n", err);
-
-               return err;
-       }
-
-       if (of_property_read_u32(np, "offset", &ns_pinctrl->offset)) {
-               dev_err(dev, "Failed to get register offset\n");
-               return -ENOENT;
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+                                          "cru_gpio_control");
+       ns_pinctrl->base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(ns_pinctrl->base)) {
+               dev_err(dev, "Failed to map pinctrl regs\n");
+               return PTR_ERR(ns_pinctrl->base);
        }
 
        memcpy(pctldesc, &ns_pinctrl_desc, sizeof(*pctldesc));
index a4ac87c8b4f8da380d1b04380e7a175351904c1b..5082102d7d0d973f8403b7c23d10a8fa3ad5f7a6 100644 (file)
@@ -2306,7 +2306,7 @@ EXPORT_SYMBOL_GPL(devm_pinctrl_register_and_init);
 
 /**
  * devm_pinctrl_unregister() - Resource managed version of pinctrl_unregister().
- * @dev: device for which which resource was allocated
+ * @dev: device for which resource was allocated
  * @pctldev: the pinctrl device to unregister.
  */
 void devm_pinctrl_unregister(struct device *dev, struct pinctrl_dev *pctldev)
index c001f2ed20f834fa84a05c47916fad967ec71843..bae9d429b813e3c57e98447622ddcf9a4995aaa9 100644 (file)
@@ -445,6 +445,7 @@ static int amd_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
        u32 wake_mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3);
+       int err;
 
        raw_spin_lock_irqsave(&gpio_dev->lock, flags);
        pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
@@ -457,6 +458,15 @@ static int amd_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
        writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
        raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
 
+       if (on)
+               err = enable_irq_wake(gpio_dev->irq);
+       else
+               err = disable_irq_wake(gpio_dev->irq);
+
+       if (err)
+               dev_err(&gpio_dev->pdev->dev, "failed to %s wake-up interrupt\n",
+                       on ? "enable" : "disable");
+
        return 0;
 }
 
@@ -830,6 +840,34 @@ static const struct pinconf_ops amd_pinconf_ops = {
        .pin_config_group_set = amd_pinconf_group_set,
 };
 
+static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
+{
+       struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+       unsigned long flags;
+       u32 pin_reg, mask;
+       int i;
+
+       mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) |
+               BIT(INTERRUPT_MASK_OFF) | BIT(INTERRUPT_ENABLE_OFF) |
+               BIT(WAKE_CNTRL_OFF_S4);
+
+       for (i = 0; i < desc->npins; i++) {
+               int pin = desc->pins[i].number;
+               const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin);
+
+               if (!pd)
+                       continue;
+
+               raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+
+               pin_reg = readl(gpio_dev->base + i * 4);
+               pin_reg &= ~mask;
+               writel(pin_reg, gpio_dev->base + i * 4);
+
+               raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+       }
+}
+
 #ifdef CONFIG_PM_SLEEP
 static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
 {
@@ -902,7 +940,6 @@ static struct pinctrl_desc amd_pinctrl_desc = {
 static int amd_gpio_probe(struct platform_device *pdev)
 {
        int ret = 0;
-       int irq_base;
        struct resource *res;
        struct amd_gpio *gpio_dev;
        struct gpio_irq_chip *girq;
@@ -925,9 +962,9 @@ static int amd_gpio_probe(struct platform_device *pdev)
        if (!gpio_dev->base)
                return -ENOMEM;
 
-       irq_base = platform_get_irq(pdev, 0);
-       if (irq_base < 0)
-               return irq_base;
+       gpio_dev->irq = platform_get_irq(pdev, 0);
+       if (gpio_dev->irq < 0)
+               return gpio_dev->irq;
 
 #ifdef CONFIG_PM_SLEEP
        gpio_dev->saved_regs = devm_kcalloc(&pdev->dev, amd_pinctrl_desc.npins,
@@ -967,6 +1004,9 @@ static int amd_gpio_probe(struct platform_device *pdev)
                return PTR_ERR(gpio_dev->pctrl);
        }
 
+       /* Disable and mask interrupts */
+       amd_gpio_irq_init(gpio_dev);
+
        girq = &gpio_dev->gc.irq;
        girq->chip = &amd_gpio_irqchip;
        /* This will let us handle the parent IRQ in the driver */
@@ -987,7 +1027,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
                goto out2;
        }
 
-       ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler,
+       ret = devm_request_irq(&pdev->dev, gpio_dev->irq, amd_gpio_irq_handler,
                               IRQF_SHARED, KBUILD_MODNAME, gpio_dev);
        if (ret)
                goto out2;
index 95e76342404222545efb3a721e60ba7c9ab8666a..1d431707365456af3df7cd6c6415a1a6ff44c89b 100644 (file)
@@ -98,6 +98,7 @@ struct amd_gpio {
        struct resource         *res;
        struct platform_device  *pdev;
        u32                     *saved_regs;
+       int                     irq;
 };
 
 /*  KERNCZ configuration*/
index ae33e376695fd75998aab478b8a1148cc95c51ba..5ce260f152ce568b32d0a80e66fd9aff59132f0d 100644 (file)
@@ -2092,6 +2092,23 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
        return false;
 }
 
+static int rockchip_pinconf_defer_output(struct rockchip_pin_bank *bank,
+                                        unsigned int pin, u32 arg)
+{
+       struct rockchip_pin_output_deferred *cfg;
+
+       cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+       if (!cfg)
+               return -ENOMEM;
+
+       cfg->pin = pin;
+       cfg->arg = arg;
+
+       list_add_tail(&cfg->head, &bank->deferred_output);
+
+       return 0;
+}
+
 /* set the pin config settings for a specified pin */
 static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
                                unsigned long *configs, unsigned num_configs)
@@ -2136,6 +2153,22 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
                        if (rc != RK_FUNC_GPIO)
                                return -EINVAL;
 
+                       /*
+                        * Check for gpio driver not being probed yet.
+                        * The lock makes sure that either gpio-probe has completed
+                        * or the gpio driver hasn't probed yet.
+                        */
+                       mutex_lock(&bank->deferred_lock);
+                       if (!gpio || !gpio->direction_output) {
+                               rc = rockchip_pinconf_defer_output(bank, pin - bank->pin_base, arg);
+                               mutex_unlock(&bank->deferred_lock);
+                               if (rc)
+                                       return rc;
+
+                               break;
+                       }
+                       mutex_unlock(&bank->deferred_lock);
+
                        rc = gpio->direction_output(gpio, pin - bank->pin_base,
                                                    arg);
                        if (rc)
@@ -2204,6 +2237,11 @@ static int rockchip_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
                if (rc != RK_FUNC_GPIO)
                        return -EINVAL;
 
+               if (!gpio || !gpio->get) {
+                       arg = 0;
+                       break;
+               }
+
                rc = gpio->get(gpio, pin - bank->pin_base);
                if (rc < 0)
                        return rc;
@@ -2450,6 +2488,9 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
                                                pin_bank->name, pin);
                        pdesc++;
                }
+
+               INIT_LIST_HEAD(&pin_bank->deferred_output);
+               mutex_init(&pin_bank->deferred_lock);
        }
 
        ret = rockchip_pinctrl_parse_dt(pdev, info);
@@ -2716,6 +2757,31 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
        return 0;
 }
 
+static int rockchip_pinctrl_remove(struct platform_device *pdev)
+{
+       struct rockchip_pinctrl *info = platform_get_drvdata(pdev);
+       struct rockchip_pin_bank *bank;
+       struct rockchip_pin_output_deferred *cfg;
+       int i;
+
+       of_platform_depopulate(&pdev->dev);
+
+       for (i = 0; i < info->ctrl->nr_banks; i++) {
+               bank = &info->ctrl->pin_banks[i];
+
+               mutex_lock(&bank->deferred_lock);
+               while (!list_empty(&bank->deferred_output)) {
+                       cfg = list_first_entry(&bank->deferred_output,
+                                              struct rockchip_pin_output_deferred, head);
+                       list_del(&cfg->head);
+                       kfree(cfg);
+               }
+               mutex_unlock(&bank->deferred_lock);
+       }
+
+       return 0;
+}
+
 static struct rockchip_pin_bank px30_pin_banks[] = {
        PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_SOURCE_PMU,
                                             IOMUX_SOURCE_PMU,
@@ -3175,6 +3241,7 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = {
 
 static struct platform_driver rockchip_pinctrl_driver = {
        .probe          = rockchip_pinctrl_probe,
+       .remove         = rockchip_pinctrl_remove,
        .driver = {
                .name   = "rockchip-pinctrl",
                .pm = &rockchip_pinctrl_dev_pm_ops,
index 589d4d2a98c9ef788f1216e337c822bfdb4bb661..91f10279d08442d1a4c441131649bea211771f74 100644 (file)
@@ -141,6 +141,8 @@ struct rockchip_drv {
  * @toggle_edge_mode: bit mask to toggle (falling/rising) edge mode
  * @recalced_mask: bit mask to indicate a need to recalulate the mask
  * @route_mask: bits describing the routing pins of per bank
+ * @deferred_output: gpio output settings to be done after gpio bank probed
+ * @deferred_lock: mutex for the deferred_output shared btw gpio and pinctrl
  */
 struct rockchip_pin_bank {
        struct device                   *dev;
@@ -169,6 +171,8 @@ struct rockchip_pin_bank {
        u32                             toggle_edge_mode;
        u32                             recalced_mask;
        u32                             route_mask;
+       struct list_head                deferred_output;
+       struct mutex                    deferred_lock;
 };
 
 /**
@@ -243,6 +247,12 @@ struct rockchip_pin_config {
        unsigned int            nconfigs;
 };
 
+struct rockchip_pin_output_deferred {
+       struct list_head head;
+       unsigned int pin;
+       u32 arg;
+};
+
 /**
  * struct rockchip_pin_group: represent group of pins of a pinmux function.
  * @name: name of the pin group, used to lookup the group.
index 32ea2a8ec02b54a1b9323cd5cc6c1663403f774a..5ff4207df66e1d5f98ed0f1b59a1c867a2a7928e 100644 (file)
@@ -3,7 +3,8 @@ if (ARCH_QCOM || COMPILE_TEST)
 
 config PINCTRL_MSM
        tristate "Qualcomm core pin controller driver"
-       depends on GPIOLIB && (QCOM_SCM || !QCOM_SCM) #if QCOM_SCM=m this can't be =y
+       depends on GPIOLIB
+       select QCOM_SCM
        select PINMUX
        select PINCONF
        select GENERIC_PINCONF
index afddf6d60dbe650c60d92ad8ed98230da5b57379..9017ede409c9c7a3bd5410e2c9ccedaaf94dc525 100644 (file)
@@ -1496,6 +1496,7 @@ static const struct of_device_id sc7280_pinctrl_of_match[] = {
 static struct platform_driver sc7280_pinctrl_driver = {
        .driver = {
                .name = "sc7280-pinctrl",
+               .pm = &msm_pinctrl_dev_pm_ops,
                .of_match_table = sc7280_pinctrl_of_match,
        },
        .probe = sc7280_pinctrl_probe,
index 98bf0e2a2a8da5141271133ea9ac434daf0db524..b2562e8931397e594b9fe67f04b512202a68e381 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, 2016-2021 The Linux Foundation. All rights reserved.
  */
 
 #include <linux/gpio/driver.h>
@@ -14,6 +14,7 @@
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
 #include <linux/slab.h>
+#include <linux/spmi.h>
 #include <linux/types.h>
 
 #include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
@@ -171,6 +172,8 @@ struct pmic_gpio_state {
        struct pinctrl_dev *ctrl;
        struct gpio_chip chip;
        struct irq_chip irq;
+       u8 usid;
+       u8 pid_base;
 };
 
 static const struct pinconf_generic_params pmic_gpio_bindings[] = {
@@ -949,12 +952,36 @@ static int pmic_gpio_child_to_parent_hwirq(struct gpio_chip *chip,
                                           unsigned int *parent_hwirq,
                                           unsigned int *parent_type)
 {
-       *parent_hwirq = child_hwirq + 0xc0;
+       struct pmic_gpio_state *state = gpiochip_get_data(chip);
+
+       *parent_hwirq = child_hwirq + state->pid_base;
        *parent_type = child_type;
 
        return 0;
 }
 
+static void *pmic_gpio_populate_parent_fwspec(struct gpio_chip *chip,
+                                            unsigned int parent_hwirq,
+                                            unsigned int parent_type)
+{
+       struct pmic_gpio_state *state = gpiochip_get_data(chip);
+       struct irq_fwspec *fwspec;
+
+       fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);
+       if (!fwspec)
+               return NULL;
+
+       fwspec->fwnode = chip->irq.parent_domain->fwnode;
+
+       fwspec->param_count = 4;
+       fwspec->param[0] = state->usid;
+       fwspec->param[1] = parent_hwirq;
+       /* param[2] must be left as 0 */
+       fwspec->param[3] = parent_type;
+
+       return fwspec;
+}
+
 static int pmic_gpio_probe(struct platform_device *pdev)
 {
        struct irq_domain *parent_domain;
@@ -965,6 +992,7 @@ static int pmic_gpio_probe(struct platform_device *pdev)
        struct pmic_gpio_pad *pad, *pads;
        struct pmic_gpio_state *state;
        struct gpio_irq_chip *girq;
+       const struct spmi_device *parent_spmi_dev;
        int ret, npins, i;
        u32 reg;
 
@@ -984,6 +1012,9 @@ static int pmic_gpio_probe(struct platform_device *pdev)
 
        state->dev = &pdev->dev;
        state->map = dev_get_regmap(dev->parent, NULL);
+       parent_spmi_dev = to_spmi_device(dev->parent);
+       state->usid = parent_spmi_dev->usid;
+       state->pid_base = reg >> 8;
 
        pindesc = devm_kcalloc(dev, npins, sizeof(*pindesc), GFP_KERNEL);
        if (!pindesc)
@@ -1059,7 +1090,7 @@ static int pmic_gpio_probe(struct platform_device *pdev)
        girq->fwnode = of_node_to_fwnode(state->dev->of_node);
        girq->parent_domain = parent_domain;
        girq->child_to_parent_hwirq = pmic_gpio_child_to_parent_hwirq;
-       girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_fourcell;
+       girq->populate_parent_alloc_arg = pmic_gpio_populate_parent_fwspec;
        girq->child_offset_to_irq = pmic_gpio_child_offset_to_irq;
        girq->child_irq_domain_ops.translate = pmic_gpio_domain_translate;
 
index 68b3886f9f0f3cdc756020cd61cfd261dc573b44..dfd8888a222a46068068a100cf709d985b22f901 100644 (file)
@@ -1644,8 +1644,8 @@ int __maybe_unused stm32_pinctrl_resume(struct device *dev)
        struct stm32_pinctrl_group *g = pctl->groups;
        int i;
 
-       for (i = g->pin; i < g->pin + pctl->ngroups; i++)
-               stm32_pinctrl_restore_gpio_regs(pctl, i);
+       for (i = 0; i < pctl->ngroups; i++, g++)
+               stm32_pinctrl_restore_gpio_regs(pctl, g->pin);
 
        return 0;
 }
index 7646708d57e42bdc0ae57d2f0cdbf41bcc1a2fc2..a916cd89cbbeddf99272f26012a790d43ce08f7b 100644 (file)
@@ -98,7 +98,7 @@ mlxreg_io_get_reg(void *regmap, struct mlxreg_core_data *data, u32 in_val,
                        if (ret)
                                goto access_error;
 
-                       *regval |= rol32(val, regsize * i);
+                       *regval |= rol32(val, regsize * i * 8);
                }
        }
 
@@ -141,7 +141,7 @@ mlxreg_io_attr_store(struct device *dev, struct device_attribute *attr,
                return -EINVAL;
 
        /* Convert buffer to input value. */
-       ret = kstrtou32(buf, len, &input_val);
+       ret = kstrtou32(buf, 0, &input_val);
        if (ret)
                return ret;
 
index d6a7c896ac8661c166368a7326c0e410dbc04f5d..fc95620101e85bf21b1201fb897c5098b3066ffd 100644 (file)
@@ -476,6 +476,7 @@ static const struct acpi_device_id amd_pmc_acpi_ids[] = {
        {"AMDI0006", 0},
        {"AMDI0007", 0},
        {"AMD0004", 0},
+       {"AMD0005", 0},
        { }
 };
 MODULE_DEVICE_TABLE(acpi, amd_pmc_acpi_ids);
index 42513eab1d064c7038d21b3105cdba67ae7d1a39..2fffa57e596e4191c68301e5244518bd2474b759 100644 (file)
@@ -167,6 +167,7 @@ config DELL_WMI
 config DELL_WMI_PRIVACY
        bool "Dell WMI Hardware Privacy Support"
        depends on LEDS_TRIGGER_AUDIO = y || DELL_WMI = LEDS_TRIGGER_AUDIO
+       depends on DELL_WMI
        help
          This option adds integration with the "Dell Hardware Privacy"
          feature of Dell laptops to the dell-wmi driver.
index d53634c8a6e09952bb70ddc0fdb99be5b71fcda5..658bab4b79648b7cdbcb9455c6053e2c322b0c8f 100644 (file)
@@ -141,6 +141,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
 
 static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE AX V2"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
index 379560fe5df96db2fad30d809e2d94db6730e75d..e03943e6380a6dfec2438c379d27674f0d69293b 100644 (file)
@@ -42,12 +42,20 @@ static void update_sar_data(struct wwan_sar_context *context)
 
        if (config->device_mode_info &&
            context->sar_data.device_mode < config->total_dev_mode) {
-               struct wwan_device_mode_info *dev_mode =
-                       &config->device_mode_info[context->sar_data.device_mode];
-
-               context->sar_data.antennatable_index = dev_mode->antennatable_index;
-               context->sar_data.bandtable_index = dev_mode->bandtable_index;
-               context->sar_data.sartable_index = dev_mode->sartable_index;
+               int itr = 0;
+
+               for (itr = 0; itr < config->total_dev_mode; itr++) {
+                       if (context->sar_data.device_mode ==
+                               config->device_mode_info[itr].device_mode) {
+                               struct wwan_device_mode_info *dev_mode =
+                               &config->device_mode_info[itr];
+
+                               context->sar_data.antennatable_index = dev_mode->antennatable_index;
+                               context->sar_data.bandtable_index = dev_mode->bandtable_index;
+                               context->sar_data.sartable_index = dev_mode->sartable_index;
+                               break;
+                       }
+               }
        }
 }
 
@@ -305,7 +313,6 @@ static struct platform_driver sar_driver = {
        .remove = sar_remove,
        .driver = {
                .name = DRVNAME,
-               .owner = THIS_MODULE,
                .acpi_match_table = ACPI_PTR(sar_device_ids)
        }
 };
@@ -313,4 +320,4 @@ module_platform_driver(sar_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Platform device driver for INTEL MODEM BIOS SAR");
-MODULE_AUTHOR("Shravan S <s.shravan@intel.com>");
+MODULE_AUTHOR("Shravan Sudhakar <s.shravan@intel.com>");
index 9fe0a2527e1cd2c5d449d7f2468c9c7b81177b61..e59d79c7e82f866f665bdcd6932f48a01ee7a878 100644 (file)
@@ -401,7 +401,7 @@ int skl_int3472_discrete_remove(struct platform_device *pdev)
 
        gpiod_remove_lookup_table(&int3472->gpios);
 
-       if (int3472->clock.ena_gpio)
+       if (int3472->clock.cl)
                skl_int3472_unregister_clock(int3472);
 
        gpiod_put(int3472->clock.ena_gpio);
index bfa0cc20750d3621996142917f62c6cf7dee48f2..7cc9089d1e14f4c16e332321ecf24327aaeb9268 100644 (file)
@@ -75,7 +75,7 @@ struct intel_scu_ipc_dev {
 #define IPC_READ_BUFFER                0x90
 
 /* Timeout in jiffies */
-#define IPC_TIMEOUT            (5 * HZ)
+#define IPC_TIMEOUT            (10 * HZ)
 
 static struct intel_scu_ipc_dev *ipcdev; /* Only one for now */
 static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
@@ -232,7 +232,7 @@ static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
 /* Wait till scu status is busy */
 static inline int busy_loop(struct intel_scu_ipc_dev *scu)
 {
-       unsigned long end = jiffies + msecs_to_jiffies(IPC_TIMEOUT);
+       unsigned long end = jiffies + IPC_TIMEOUT;
 
        do {
                u32 status;
@@ -247,7 +247,7 @@ static inline int busy_loop(struct intel_scu_ipc_dev *scu)
        return -ETIMEDOUT;
 }
 
-/* Wait till ipc ioc interrupt is received or timeout in 3 HZ */
+/* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
 static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
 {
        int status;
index 4dfc52e06704117da71d836d3fc6a18f7ec4ae4f..f9b2d66b04433e749a695fcbc639166300073a3a 100644 (file)
@@ -170,6 +170,7 @@ static void ptp_clock_release(struct device *dev)
        struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
 
        ptp_cleanup_pin_groups(ptp);
+       kfree(ptp->vclock_index);
        mutex_destroy(&ptp->tsevq_mux);
        mutex_destroy(&ptp->pincfg_mux);
        mutex_destroy(&ptp->n_vclocks_mux);
@@ -283,15 +284,20 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
        /* Create a posix clock and link it to the device. */
        err = posix_clock_register(&ptp->clock, &ptp->dev);
        if (err) {
+               if (ptp->pps_source)
+                       pps_unregister_source(ptp->pps_source);
+
+               if (ptp->kworker)
+                       kthread_destroy_worker(ptp->kworker);
+
+               put_device(&ptp->dev);
+
                pr_err("failed to create posix clock\n");
-               goto no_clock;
+               return ERR_PTR(err);
        }
 
        return ptp;
 
-no_clock:
-       if (ptp->pps_source)
-               pps_unregister_source(ptp->pps_source);
 no_pps:
        ptp_cleanup_pin_groups(ptp);
 no_pin_groups:
@@ -321,8 +327,6 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
        ptp->defunct = 1;
        wake_up_interruptible(&ptp->tsev_wq);
 
-       kfree(ptp->vclock_index);
-
        if (ptp->kworker) {
                kthread_cancel_delayed_work_sync(&ptp->aux_work);
                kthread_destroy_worker(ptp->kworker);
index 3dd519dfc473c7719da1e7f35f532ac548012473..4991054a213506601a24a515418223ee278dd448 100644 (file)
@@ -15,8 +15,6 @@
 #include <linux/ptp_clock_kernel.h>
 #include <linux/ptp_kvm.h>
 
-struct pvclock_vsyscall_time_info *hv_clock;
-
 static phys_addr_t clock_pair_gpa;
 static struct kvm_clock_pairing clock_pair;
 
@@ -28,16 +26,15 @@ int kvm_arch_ptp_init(void)
                return -ENODEV;
 
        clock_pair_gpa = slow_virt_to_phys(&clock_pair);
-       hv_clock = pvclock_get_pvti_cpu0_va();
-       if (!hv_clock)
+       if (!pvclock_get_pvti_cpu0_va())
                return -ENODEV;
 
        ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
                             KVM_CLOCK_PAIRING_WALLCLOCK);
-       if (ret == -KVM_ENOSYS || ret == -KVM_EOPNOTSUPP)
+       if (ret == -KVM_ENOSYS)
                return -ENODEV;
 
-       return 0;
+       return ret;
 }
 
 int kvm_arch_ptp_get_clock(struct timespec64 *ts)
@@ -64,10 +61,8 @@ int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec,
        struct pvclock_vcpu_time_info *src;
        unsigned int version;
        long ret;
-       int cpu;
 
-       cpu = smp_processor_id();
-       src = &hv_clock[cpu].pvti;
+       src = this_cpu_pvti();
 
        do {
                /*
index a17e8cc642c5f2ca5b1c43ef4b142f2bbd566f2c..8070f3fd98f01d0de87f6dfc87ac87fde7d28446 100644 (file)
@@ -644,6 +644,7 @@ static const struct pci_device_id pch_ieee1588_pcidev_id[] = {
         },
        {0}
 };
+MODULE_DEVICE_TABLE(pci, pch_ieee1588_pcidev_id);
 
 static SIMPLE_DEV_PM_OPS(pch_pm_ops, pch_suspend, pch_resume);
 
index be799a5abf8a6edcd1f1f6d754f695ebcd3a4a97..b0056ae5d463aedd5d3de68f720afdaf47eafc79 100644 (file)
@@ -147,8 +147,8 @@ config RESET_OXNAS
        bool
 
 config RESET_PISTACHIO
-       bool "Pistachio Reset Driver" if COMPILE_TEST
-       default MACH_PISTACHIO
+       bool "Pistachio Reset Driver"
+       depends on MIPS || COMPILE_TEST
        help
          This enables the reset driver for ImgTec Pistachio SoCs.
 
index b6f074d6a65f8c34686712c96a150a32dc9b3386..433fa0c40e477f8770229aeeb48c7c90a63c222e 100644 (file)
@@ -38,7 +38,7 @@ static int brcm_rescal_reset_set(struct reset_controller_dev *rcdev,
        }
 
        ret = readl_poll_timeout(base + BRCM_RESCAL_STATUS, reg,
-                                !(reg & BRCM_RESCAL_STATUS_BIT), 100, 1000);
+                                (reg & BRCM_RESCAL_STATUS_BIT), 100, 1000);
        if (ret) {
                dev_err(data->dev, "time out on SATA/PCIe rescal\n");
                return ret;
index 2a72f861f79839ec9ec8f220ec8de4a42dffaa91..8c6492e5693c738f648e0f4cf39a4da8d51a6512 100644 (file)
@@ -92,3 +92,29 @@ void __init socfpga_reset_init(void)
        for_each_matching_node(np, socfpga_early_reset_dt_ids)
                a10_reset_init(np);
 }
+
+/*
+ * The early driver is problematic, because it doesn't register
+ * itself as a driver. This causes certain device links to prevent
+ * consumer devices from probing. The hacky solution is to register
+ * an empty driver, whose only job is to attach itself to the reset
+ * manager and call probe.
+ */
+static const struct of_device_id socfpga_reset_dt_ids[] = {
+       { .compatible = "altr,rst-mgr", },
+       { /* sentinel */ },
+};
+
+static int reset_simple_probe(struct platform_device *pdev)
+{
+       return 0;
+}
+
+static struct platform_driver reset_socfpga_driver = {
+       .probe  = reset_simple_probe,
+       .driver = {
+               .name           = "socfpga-reset",
+               .of_match_table = socfpga_reset_dt_ids,
+       },
+};
+builtin_platform_driver(reset_socfpga_driver);
index 24d3395964cc4ba2d3934a32299fef3f667cd45f..4c5bba52b10593890c9a95ccea929148db9cdc5f 100644 (file)
@@ -20,6 +20,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
        struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
        struct mrq_reset_request request;
        struct tegra_bpmp_message msg;
+       int err;
 
        memset(&request, 0, sizeof(request));
        request.cmd = command;
@@ -30,7 +31,13 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
        msg.tx.data = &request;
        msg.tx.size = sizeof(request);
 
-       return tegra_bpmp_transfer(bpmp, &msg);
+       err = tegra_bpmp_transfer(bpmp, &msg);
+       if (err)
+               return err;
+       if (msg.rx.ret)
+               return -EINVAL;
+
+       return 0;
 }
 
 static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
index f3c656975e054ec5ce0d2ec95ab7ee7d9aae3feb..93695d535380bcc08442c31376d14c4c5084e508 100644 (file)
@@ -262,10 +262,12 @@ static int blacklist_parse_proc_parameters(char *buf)
 
        if (strcmp("free", parm) == 0) {
                rc = blacklist_parse_parameters(buf, free, 0);
-               /* There could be subchannels without proper devices connected.
-                * evaluate all the entries
+               /*
+                * Evaluate the subchannels without an online device. This way,
+                * no path-verification will be triggered on those subchannels
+                * and it avoids unnecessary delays.
                 */
-               css_schedule_eval_all();
+               css_schedule_eval_cond(CSS_EVAL_NOT_ONLINE, 0);
        } else if (strcmp("add", parm) == 0)
                rc = blacklist_parse_parameters(buf, add, 0);
        else if (strcmp("purge", parm) == 0)
index 3377097e65de68a0f3c9c9422846fde18e444452..44461928aab8afd3b3042a31c2640d39dec931ee 100644 (file)
@@ -788,27 +788,49 @@ static int __unset_registered(struct device *dev, void *data)
        return 0;
 }
 
-void css_schedule_eval_all_unreg(unsigned long delay)
+static int __unset_online(struct device *dev, void *data)
+{
+       struct idset *set = data;
+       struct subchannel *sch = to_subchannel(dev);
+       struct ccw_device *cdev = sch_get_cdev(sch);
+
+       if (cdev && cdev->online)
+               idset_sch_del(set, sch->schid);
+
+       return 0;
+}
+
+void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay)
 {
        unsigned long flags;
-       struct idset *unreg_set;
+       struct idset *set;
 
        /* Find unregistered subchannels. */
-       unreg_set = idset_sch_new();
-       if (!unreg_set) {
+       set = idset_sch_new();
+       if (!set) {
                /* Fallback. */
                css_schedule_eval_all();
                return;
        }
-       idset_fill(unreg_set);
-       bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
+       idset_fill(set);
+       switch (cond) {
+       case CSS_EVAL_UNREG:
+               bus_for_each_dev(&css_bus_type, NULL, set, __unset_registered);
+               break;
+       case CSS_EVAL_NOT_ONLINE:
+               bus_for_each_dev(&css_bus_type, NULL, set, __unset_online);
+               break;
+       default:
+               break;
+       }
+
        /* Apply to slow_subchannel_set. */
        spin_lock_irqsave(&slow_subchannel_lock, flags);
-       idset_add_set(slow_subchannel_set, unreg_set);
+       idset_add_set(slow_subchannel_set, set);
        atomic_set(&css_eval_scheduled, 1);
        queue_delayed_work(cio_work_q, &slow_path_work, delay);
        spin_unlock_irqrestore(&slow_subchannel_lock, flags);
-       idset_free(unreg_set);
+       idset_free(set);
 }
 
 void css_wait_for_slow_path(void)
@@ -820,7 +842,7 @@ void css_wait_for_slow_path(void)
 void css_schedule_reprobe(void)
 {
        /* Schedule with a delay to allow merging of subsequent calls. */
-       css_schedule_eval_all_unreg(1 * HZ);
+       css_schedule_eval_cond(CSS_EVAL_UNREG, 1 * HZ);
 }
 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
 
index c98522cbe276f13019f990e03ab015a039e46747..ede0b905bc6ff7c85c592287889c345ce69d3d15 100644 (file)
 #define SNID_STATE3_MULTI_PATH    1
 #define SNID_STATE3_SINGLE_PATH           0
 
+/*
+ * Conditions used to specify which subchannels need evaluation
+ */
+enum css_eval_cond {
+       CSS_EVAL_UNREG,         /* unregistered subchannels */
+       CSS_EVAL_NOT_ONLINE     /* sch without an online-device */
+};
+
 struct path_state {
        __u8  state1 : 2;       /* path state value 1 */
        __u8  state2 : 2;       /* path state value 2 */
@@ -136,7 +144,7 @@ static inline struct channel_subsystem *css_by_id(u8 cssid)
 /* Helper functions to build lists for the slow path. */
 void css_schedule_eval(struct subchannel_id schid);
 void css_schedule_eval_all(void);
-void css_schedule_eval_all_unreg(unsigned long delay);
+void css_schedule_eval_cond(enum css_eval_cond, unsigned long delay);
 int css_complete_work(void);
 
 int sch_is_pseudo_sch(struct subchannel *);
index 118939a7729a1e46176e440a9975cc92642937da..623d5269a52ce595fdbeac56d845a917beb7a54b 100644 (file)
@@ -361,6 +361,7 @@ err_list:
        mutex_lock(&matrix_dev->lock);
        list_del(&matrix_mdev->node);
        mutex_unlock(&matrix_dev->lock);
+       vfio_uninit_group_dev(&matrix_mdev->vdev);
        kfree(matrix_mdev);
 err_dec_available:
        atomic_inc(&matrix_dev->available_instances);
@@ -376,9 +377,10 @@ static void vfio_ap_mdev_remove(struct mdev_device *mdev)
        mutex_lock(&matrix_dev->lock);
        vfio_ap_mdev_reset_queues(matrix_mdev);
        list_del(&matrix_mdev->node);
+       mutex_unlock(&matrix_dev->lock);
+       vfio_uninit_group_dev(&matrix_mdev->vdev);
        kfree(matrix_mdev);
        atomic_inc(&matrix_dev->available_instances);
-       mutex_unlock(&matrix_dev->lock);
 }
 
 static ssize_t name_show(struct mdev_type *mtype,
index b4cb5fb199986b767c7fc4dd869b84694e7deba5..0cc62c1b082544d1b644bc1f4a6c33c7c665f0c9 100644 (file)
@@ -1776,7 +1776,7 @@ int acornscsi_reconnect_finish(AS_Host *host)
        host->scsi.disconnectable = 0;
        if (host->SCpnt->device->id  == host->scsi.reconnected.target &&
            host->SCpnt->device->lun == host->scsi.reconnected.lun &&
-           scsi_cmd_to_tag(host->SCpnt) == host->scsi.reconnected.tag) {
+           scsi_cmd_to_rq(host->SCpnt)->tag == host->scsi.reconnected.tag) {
 #if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON))
            DBG(host->SCpnt, printk("scsi%d.%c: reconnected",
                    host->host->host_no, acornscsi_target(host)));
index 390b07bf92b9794d0727de8e942c4d5e9eba3834..ccbded3353bd0ea0b8d239b73608fbb075508c2a 100644 (file)
@@ -1254,3 +1254,4 @@ MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
 MODULE_VERSION(CSIO_DRV_VERSION);
 MODULE_FIRMWARE(FW_FNAME_T5);
 MODULE_FIRMWARE(FW_FNAME_T6);
+MODULE_SOFTDEP("pre: cxgb4");
index 40fb3a724c76d019ce14dc58e6679beca080300c..cf2e41dd354cbc83940ed81382b6ec79e1a2dc44 100644 (file)
@@ -32,7 +32,7 @@ efct_scsi_io_alloc(struct efct_node *node)
        struct efct *efct;
        struct efct_xport *xport;
        struct efct_io *io;
-       unsigned long flags = 0;
+       unsigned long flags;
 
        efct = node->efct;
 
@@ -44,7 +44,6 @@ efct_scsi_io_alloc(struct efct_node *node)
        if (!io) {
                efc_log_err(efct, "IO alloc Failed\n");
                atomic_add_return(1, &xport->io_alloc_failed_count);
-               spin_unlock_irqrestore(&node->active_ios_lock, flags);
                return NULL;
        }
 
index 3f6f14f0cafb3e50e94ff0a5f9e38f6842dffd86..24b72ee4246fb66ce89b0a1ce98601fef77b8dd9 100644 (file)
@@ -220,7 +220,8 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
                goto fail;
        }
 
-       shost->cmd_per_lun = min_t(short, shost->cmd_per_lun,
+       /* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
+       shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
                                   shost->can_queue);
 
        error = scsi_init_sense_cache(shost);
index 4683c183e9d411e07a083733c4dd1a7e7b4ed0f1..5bc91d34df634db65d06cf03ab2250aef2940038 100644 (file)
@@ -2281,11 +2281,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
                return FAILED;
        }
 
-       conn = session->leadconn;
-       iscsi_get_conn(conn->cls_conn);
-       conn->eh_abort_cnt++;
-       age = session->age;
-
        spin_lock(&session->back_lock);
        task = (struct iscsi_task *)sc->SCp.ptr;
        if (!task || !task->sc) {
@@ -2293,8 +2288,16 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
                ISCSI_DBG_EH(session, "sc completed while abort in progress\n");
 
                spin_unlock(&session->back_lock);
-               goto success;
+               spin_unlock_bh(&session->frwd_lock);
+               mutex_unlock(&session->eh_mutex);
+               return SUCCESS;
        }
+
+       conn = session->leadconn;
+       iscsi_get_conn(conn->cls_conn);
+       conn->eh_abort_cnt++;
+       age = session->age;
+
        ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
        __iscsi_get_task(task);
        spin_unlock(&session->back_lock);
index 78ce38d7251c595d797502c5625eab42af2faedf..026a1196a54d5e109a478a7631e6285f5b9a5e0c 100644 (file)
@@ -12292,12 +12292,12 @@ void
 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                     struct lpfc_iocbq *rspiocb)
 {
-       struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+       struct lpfc_nodelist *ndlp = NULL;
        IOCB_t *irsp = &rspiocb->iocb;
 
        /* ELS cmd tag <ulpIoTag> completes */
        lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-                       "0139 Ignoring ELS cmd tag x%x completion Data: "
+                       "0139 Ignoring ELS cmd code x%x completion Data: "
                        "x%x x%x x%x\n",
                        irsp->ulpIoTag, irsp->ulpStatus,
                        irsp->un.ulpWord[4], irsp->ulpTimeout);
@@ -12305,10 +12305,13 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
         * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
         * if exchange is busy.
         */
-       if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
+       if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
+               ndlp = cmdiocb->context_un.ndlp;
                lpfc_ct_free_iocb(phba, cmdiocb);
-       else
+       } else {
+               ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
                lpfc_els_free_iocb(phba, cmdiocb);
+       }
 
        lpfc_nlp_put(ndlp);
 }
index 2197988333fe5c170bfb2ae7cf7488c0f01e8633..3cae8803383b6cbb9970dc8e541646fb1f6f0bf9 100644 (file)
@@ -3736,7 +3736,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        shost->max_lun = -1;
        shost->unique_id = mrioc->id;
 
-       shost->max_channel = 1;
+       shost->max_channel = 0;
        shost->max_id = 0xFFFFFFFF;
 
        if (prot_mask >= 0)
index 4b5d28d89d6906462364f6c8207937dce43e66c2..655cf5de604b8711065e32e8f0ecac1378e4160f 100644 (file)
@@ -431,7 +431,7 @@ done_unmap_sg:
        goto done_free_fcport;
 
 done_free_fcport:
-       if (bsg_request->msgcode == FC_BSG_RPT_ELS)
+       if (bsg_request->msgcode != FC_BSG_RPT_ELS)
                qla2x00_free_fcport(fcport);
 done:
        return rval;
index ece60267b971e89b803681a05f6ae634ab570cb0..b26f2699adb2702394cdafa862113a56ee591c4d 100644 (file)
@@ -2634,7 +2634,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
        }
 
        if (unlikely(logit))
-               ql_log(ql_log_warn, fcport->vha, 0x5060,
+               ql_log(ql_dbg_io, fcport->vha, 0x5060,
                   "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x  ox_id=%x\n",
                   sp->name, sp->handle, comp_status,
                   fd->transferred_length, le32_to_cpu(sts->residual_len),
@@ -3491,7 +3491,7 @@ check_scsi_status:
 
 out:
        if (logit)
-               ql_log(ql_log_warn, fcport->vha, 0x3022,
+               ql_log(ql_dbg_io, fcport->vha, 0x3022,
                       "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
                       comp_status, scsi_status, res, vha->host_no,
                       cp->device->id, cp->device->lun, fcport->d_id.b.domain,
index d2e40aaba734de9e47528c1853cd343634fe4f24..836fedcea241bc0eca8ff181f2cfac1ef8ac8514 100644 (file)
@@ -4157,7 +4157,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
                                        ql_dbg_pci(ql_dbg_init, ha->pdev,
                                            0xe0ee, "%s: failed alloc dsd\n",
                                            __func__);
-                                       return 1;
+                                       return -ENOMEM;
                                }
                                ha->dif_bundle_kallocs++;
 
index b3478ed9b12e8076fe333555c1e499a7b08cd8c9..7d8242c120fc77ab08073876fa6f5d8a078c6979 100644 (file)
@@ -3319,8 +3319,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
                        "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
                        vha->flags.online, qla2x00_reset_active(vha),
                        cmd->reset_count, qpair->chip_reset);
-               spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
-               return 0;
+               goto out_unmap_unlock;
        }
 
        /* Does F/W have an IOCBs for this request */
@@ -3445,10 +3444,6 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
        prm.sg = NULL;
        prm.req_cnt = 1;
 
-       /* Calculate number of entries and segments required */
-       if (qlt_pci_map_calc_cnt(&prm) != 0)
-               return -EAGAIN;
-
        if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
            (cmd->sess && cmd->sess->deleted)) {
                /*
@@ -3466,6 +3461,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
                return 0;
        }
 
+       /* Calculate number of entries and segments required */
+       if (qlt_pci_map_calc_cnt(&prm) != 0)
+               return -EAGAIN;
+
        spin_lock_irqsave(qpair->qp_lock_ptr, flags);
        /* Does F/W have an IOCBs for this request */
        res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
@@ -3870,9 +3869,6 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
 
        BUG_ON(cmd->cmd_in_wq);
 
-       if (cmd->sg_mapped)
-               qlt_unmap_sg(cmd->vha, cmd);
-
        if (!cmd->q_full)
                qlt_decr_num_pend_cmds(cmd->vha);
 
index b241f9e3885c694823271380dc791cc60c1a7b4d..291ecc33b1fe64833314bcf131c0546280d36949 100644 (file)
@@ -553,8 +553,10 @@ EXPORT_SYMBOL(scsi_device_get);
  */
 void scsi_device_put(struct scsi_device *sdev)
 {
-       module_put(sdev->host->hostt->module);
+       struct module *mod = sdev->host->hostt->module;
+
        put_device(&sdev->sdev_gendev);
+       module_put(mod);
 }
 EXPORT_SYMBOL(scsi_device_put);
 
index 86793259e541aa9d705ceefb1d090c408748ff30..a35841b34bfd9c8ecfdd5e7c24cdbe864b22c9cc 100644 (file)
@@ -449,9 +449,12 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
        struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
        struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
        unsigned long flags;
+       struct module *mod;
 
        sdev = container_of(work, struct scsi_device, ew.work);
 
+       mod = sdev->host->hostt->module;
+
        scsi_dh_release_device(sdev);
 
        parent = sdev->sdev_gendev.parent;
@@ -502,11 +505,17 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
 
        if (parent)
                put_device(parent);
+       module_put(mod);
 }
 
 static void scsi_device_dev_release(struct device *dev)
 {
        struct scsi_device *sdp = to_scsi_device(dev);
+
+       /* Set module pointer as NULL in case of module unloading */
+       if (!try_module_get(sdp->host->hostt->module))
+               sdp->host->hostt->module = NULL;
+
        execute_in_process_context(scsi_device_dev_release_usercontext,
                                   &sdp->ew);
 }
index 922e4c7bd88e4d9a5ff80e61c09cbabe68cb4f49..78343d3f938573226c3f394009f57165db18c56b 100644 (file)
@@ -2930,8 +2930,6 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
                        session->recovery_tmo = value;
                break;
        default:
-               err = transport->set_param(conn, ev->u.set_param.param,
-                                          data, ev->u.set_param.len);
                if ((conn->state == ISCSI_CONN_BOUND) ||
                        (conn->state == ISCSI_CONN_UP)) {
                        err = transport->set_param(conn, ev->u.set_param.param,
index 523bf2fdc25321e3444e04b6ab4f33fffb14041c..fce63335084ed5e0121e3e3508acaa35a5af1d31 100644 (file)
@@ -3683,7 +3683,12 @@ static int sd_resume(struct device *dev)
 static int sd_resume_runtime(struct device *dev)
 {
        struct scsi_disk *sdkp = dev_get_drvdata(dev);
-       struct scsi_device *sdp = sdkp->device;
+       struct scsi_device *sdp;
+
+       if (!sdkp)      /* E.g.: runtime resume at the start of sd_probe() */
+               return 0;
+
+       sdp = sdkp->device;
 
        if (sdp->ignore_media_change) {
                /* clear the device's sense data */
index 43e682297fd5f9937888850782a914ded207730f..0a1734f34587dd4dc9e8b78468b05d37336e86b5 100644 (file)
@@ -118,7 +118,7 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
 static int ses_send_diag(struct scsi_device *sdev, int page_code,
                         void *buf, int bufflen)
 {
-       u32 result;
+       int result;
 
        unsigned char cmd[] = {
                SEND_DIAGNOSTIC,
index ebbbc1299c625ba9ea24a0af2bd7bf7b92db1eee..9eb1b88a29dde3c53d294ab6ba846271596944b9 100644 (file)
@@ -1285,11 +1285,15 @@ static void storvsc_on_channel_callback(void *context)
        foreach_vmbus_pkt(desc, channel) {
                struct vstor_packet *packet = hv_pkt_data(desc);
                struct storvsc_cmd_request *request = NULL;
+               u32 pktlen = hv_pkt_datalen(desc);
                u64 rqst_id = desc->trans_id;
+               u32 minlen = rqst_id ? sizeof(struct vstor_packet) -
+                       stor_device->vmscsi_size_delta : sizeof(enum vstor_packet_operation);
 
-               if (hv_pkt_datalen(desc) < sizeof(struct vstor_packet) -
-                               stor_device->vmscsi_size_delta) {
-                       dev_err(&device->device, "Invalid packet len\n");
+               if (pktlen < minlen) {
+                       dev_err(&device->device,
+                               "Invalid pkt: id=%llu, len=%u, minlen=%u\n",
+                               rqst_id, pktlen, minlen);
                        continue;
                }
 
@@ -1302,13 +1306,23 @@ static void storvsc_on_channel_callback(void *context)
                        if (rqst_id == 0) {
                                /*
                                 * storvsc_on_receive() looks at the vstor_packet in the message
-                                * from the ring buffer.  If the operation in the vstor_packet is
-                                * COMPLETE_IO, then we call storvsc_on_io_completion(), and
-                                * dereference the guest memory address.  Make sure we don't call
-                                * storvsc_on_io_completion() with a guest memory address that is
-                                * zero if Hyper-V were to construct and send such a bogus packet.
+                                * from the ring buffer.
+                                *
+                                * - If the operation in the vstor_packet is COMPLETE_IO, then
+                                *   we call storvsc_on_io_completion(), and dereference the
+                                *   guest memory address.  Make sure we don't call
+                                *   storvsc_on_io_completion() with a guest memory address
+                                *   that is zero if Hyper-V were to construct and send such
+                                *   a bogus packet.
+                                *
+                                * - If the operation in the vstor_packet is FCHBA_DATA, then
+                                *   we call cache_wwn(), and access the data payload area of
+                                *   the packet (wwn_packet); however, there is no guarantee
+                                *   that the packet is big enough to contain such area.
+                                *   Future-proof the code by rejecting such a bogus packet.
                                 */
-                               if (packet->operation == VSTOR_OPERATION_COMPLETE_IO) {
+                               if (packet->operation == VSTOR_OPERATION_COMPLETE_IO ||
+                                   packet->operation == VSTOR_OPERATION_FCHBA_DATA) {
                                        dev_err(&device->device, "Invalid packet with ID of 0\n");
                                        continue;
                                }
index 149c1aa0910357765935e58835ac3f3085ff9644..51424557810dab378a5178d2f7beda751fa12a94 100644 (file)
@@ -370,20 +370,6 @@ static void ufs_intel_common_exit(struct ufs_hba *hba)
 
 static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
 {
-       /*
-        * To support S4 (suspend-to-disk) with spm_lvl other than 5, the base
-        * address registers must be restored because the restore kernel can
-        * have used different addresses.
-        */
-       ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
-                     REG_UTP_TRANSFER_REQ_LIST_BASE_L);
-       ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
-                     REG_UTP_TRANSFER_REQ_LIST_BASE_H);
-       ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
-                     REG_UTP_TASK_REQ_LIST_BASE_L);
-       ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
-                     REG_UTP_TASK_REQ_LIST_BASE_H);
-
        if (ufshcd_is_link_hibern8(hba)) {
                int ret = ufshcd_uic_hibern8_exit(hba);
 
@@ -463,6 +449,18 @@ static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
        .device_reset           = ufs_intel_device_reset,
 };
 
+#ifdef CONFIG_PM_SLEEP
+static int ufshcd_pci_restore(struct device *dev)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+
+       /* Force a full reset and restore */
+       ufshcd_set_link_off(hba);
+
+       return ufshcd_system_resume(dev);
+}
+#endif
+
 /**
  * ufshcd_pci_shutdown - main function to put the controller in reset state
  * @pdev: pointer to PCI device handle
@@ -546,9 +544,14 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 }
 
 static const struct dev_pm_ops ufshcd_pci_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
        SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
 #ifdef CONFIG_PM_SLEEP
+       .suspend        = ufshcd_system_suspend,
+       .resume         = ufshcd_system_resume,
+       .freeze         = ufshcd_system_suspend,
+       .thaw           = ufshcd_system_resume,
+       .poweroff       = ufshcd_system_suspend,
+       .restore        = ufshcd_pci_restore,
        .prepare        = ufshcd_suspend_prepare,
        .complete       = ufshcd_resume_complete,
 #endif
index 029c9631ec2bf00ae252c5ae42e134bf3e53fe12..95be7ecdfe10b8c00d166535e2ec54bfe4c4f72e 100644 (file)
@@ -318,8 +318,7 @@ static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
                                     enum ufs_trace_str_t str_t)
 {
-       int off = (int)tag - hba->nutrs;
-       struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
+       struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
 
        if (!trace_ufshcd_upiu_enabled())
                return;
@@ -6378,27 +6377,6 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
        return retval;
 }
 
-struct ctm_info {
-       struct ufs_hba  *hba;
-       unsigned long   pending;
-       unsigned int    ncpl;
-};
-
-static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
-{
-       struct ctm_info *const ci = priv;
-       struct completion *c;
-
-       WARN_ON_ONCE(reserved);
-       if (test_bit(req->tag, &ci->pending))
-               return true;
-       ci->ncpl++;
-       c = req->end_io_data;
-       if (c)
-               complete(c);
-       return true;
-}
-
 /**
  * ufshcd_tmc_handler - handle task management function completion
  * @hba: per adapter instance
@@ -6409,18 +6387,24 @@ static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
  */
 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
 {
-       unsigned long flags;
-       struct request_queue *q = hba->tmf_queue;
-       struct ctm_info ci = {
-               .hba     = hba,
-       };
+       unsigned long flags, pending, issued;
+       irqreturn_t ret = IRQ_NONE;
+       int tag;
+
+       pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
 
        spin_lock_irqsave(hba->host->host_lock, flags);
-       ci.pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
-       blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
+       issued = hba->outstanding_tasks & ~pending;
+       for_each_set_bit(tag, &issued, hba->nutmrs) {
+               struct request *req = hba->tmf_rqs[tag];
+               struct completion *c = req->end_io_data;
+
+               complete(c);
+               ret = IRQ_HANDLED;
+       }
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
-       return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
+       return ret;
 }
 
 /**
@@ -6543,9 +6527,9 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
        ufshcd_hold(hba, false);
 
        spin_lock_irqsave(host->host_lock, flags);
-       blk_mq_start_request(req);
 
        task_tag = req->tag;
+       hba->tmf_rqs[req->tag] = req;
        treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag);
 
        memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
@@ -6586,6 +6570,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
        }
 
        spin_lock_irqsave(hba->host->host_lock, flags);
+       hba->tmf_rqs[req->tag] = NULL;
        __clear_bit(task_tag, &hba->outstanding_tasks);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
@@ -9636,6 +9621,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
                err = PTR_ERR(hba->tmf_queue);
                goto free_tmf_tag_set;
        }
+       hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
+                                   sizeof(*hba->tmf_rqs), GFP_KERNEL);
+       if (!hba->tmf_rqs) {
+               err = -ENOMEM;
+               goto free_tmf_queue;
+       }
 
        /* Reset the attached device */
        ufshcd_device_reset(hba);
index f0da5d3db1fa70e7af38ad40fab4ab914a4f2506..41f6e06f91856469113e2e14f5e4639c935114df 100644 (file)
@@ -828,6 +828,7 @@ struct ufs_hba {
 
        struct blk_mq_tag_set tmf_tag_set;
        struct request_queue *tmf_queue;
+       struct request **tmf_rqs;
 
        struct uic_command *active_uic_cmd;
        struct mutex uic_cmd_mutex;
index c25ce8f0e0afc68341315ac04899f69b30c9db91..07d0250f17c3a4a16a28ea5d930ac1df058bc6bf 100644 (file)
@@ -300,7 +300,7 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
                }
                break;
        default:
-               pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
+               pr_info("Unsupported virtio scsi event reason %x\n", event->reason);
        }
 }
 
@@ -392,7 +392,7 @@ static void virtscsi_handle_event(struct work_struct *work)
                virtscsi_handle_param_change(vscsi, event);
                break;
        default:
-               pr_err("Unsupport virtio scsi event %x\n", event->event);
+               pr_err("Unsupported virtio scsi event %x\n", event->event);
        }
        virtscsi_kick_event(vscsi, event_node);
 }
index 8179b69518b43904e79d64c16526ce480dfa3718..853096b7e84c39e6241250b12b28528e83d5804f 100644 (file)
@@ -5,7 +5,6 @@ config SOC_K210_SYSCTL
        depends on RISCV && SOC_CANAAN && OF
        default SOC_CANAAN
         select PM
-        select SIMPLE_PM_BUS
         select SYSCON
         select MFD_SYSCON
        help
index bda170d7b4a20ba40961e83c68ba3cde17b07a08..72fc2b539213500df590082d6d5973a8df8755ab 100644 (file)
@@ -98,7 +98,7 @@ void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len)
        if (ehdr->e_phnum < 2)
                return ERR_PTR(-EINVAL);
 
-       if (phdrs[0].p_type == PT_LOAD || phdrs[1].p_type == PT_LOAD)
+       if (phdrs[0].p_type == PT_LOAD)
                return ERR_PTR(-EINVAL);
 
        if ((phdrs[1].p_flags & QCOM_MDT_TYPE_MASK) != QCOM_MDT_TYPE_HASH)
index 9faf48302f4bc012fd3ea805ea309b71bff943d6..52e581167115599127c8b8b89395bb5a7699c66f 100644 (file)
@@ -628,7 +628,7 @@ static int qcom_socinfo_probe(struct platform_device *pdev)
        /* Feed the soc specific unique data into entropy pool */
        add_device_randomness(info, item_size);
 
-       platform_set_drvdata(pdev, qs->soc_dev);
+       platform_set_drvdata(pdev, qs);
 
        return 0;
 }
index ea64e187854eb8579a5a1ff1e1eaf9a6aeeaedd8..f32e1cbbe8c52f729925cb05baff6ee8d99b80ce 100644 (file)
@@ -825,25 +825,28 @@ static int omap_reset_deassert(struct reset_controller_dev *rcdev,
        writel_relaxed(v, reset->prm->base + reset->prm->data->rstctrl);
        spin_unlock_irqrestore(&reset->lock, flags);
 
-       if (!has_rstst)
-               goto exit;
+       /* wait for the reset bit to clear */
+       ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +
+                                               reset->prm->data->rstctrl,
+                                               v, !(v & BIT(id)), 1,
+                                               OMAP_RESET_MAX_WAIT);
+       if (ret)
+               pr_err("%s: timedout waiting for %s:%lu\n", __func__,
+                      reset->prm->data->name, id);
 
        /* wait for the status to be set */
-       ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +
+       if (has_rstst) {
+               ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +
                                                 reset->prm->data->rstst,
                                                 v, v & BIT(st_bit), 1,
                                                 OMAP_RESET_MAX_WAIT);
-       if (ret)
-               pr_err("%s: timedout waiting for %s:%lu\n", __func__,
-                      reset->prm->data->name, id);
+               if (ret)
+                       pr_err("%s: timedout waiting for %s:%lu\n", __func__,
+                              reset->prm->data->name, id);
+       }
 
-exit:
-       if (reset->clkdm) {
-               /* At least dra7 iva needs a delay before clkdm idle */
-               if (has_rstst)
-                       udelay(1);
+       if (reset->clkdm)
                pdata->clkdm_allow_idle(reset->clkdm);
-       }
 
        return ret;
 }
index 713292b0c71ea74889882375e7eff373dffb353b..3226c4e1c7c0dd29e6a7f3e36db18f4cd401b9a8 100644 (file)
@@ -1194,7 +1194,7 @@ static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev)
        return 0;
 }
 
-static int tegra_slink_runtime_resume(struct device *dev)
+static int __maybe_unused tegra_slink_runtime_resume(struct device *dev)
 {
        struct spi_master *master = dev_get_drvdata(dev);
        struct tegra_slink_data *tspi = spi_master_get_devdata(master);
index 8e085dda0c18d29ca62093b0d65add503f008285..712e01c37870c2d2b76530be1c928cedac290ddc 100644 (file)
@@ -1646,6 +1646,8 @@ static input_system_err_t input_system_configure_channel_sensor(
        default:
                return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
        }
+
+       return INPUT_SYSTEM_ERR_NO_ERROR;
 }
 
 // Test flags and set structure.
index 8a2edd67f2c6fc8031dd0a61784bd7a378ae8601..20e5081588719bc6fad73d6873c33dd6f279315c 100644 (file)
@@ -919,7 +919,7 @@ static int hantro_probe(struct platform_device *pdev)
                if (!vpu->variant->irqs[i].handler)
                        continue;
 
-               if (vpu->variant->num_clocks > 1) {
+               if (vpu->variant->num_irqs > 1) {
                        irq_name = vpu->variant->irqs[i].name;
                        irq = platform_get_irq_byname(vpu->pdev, irq_name);
                } else {
index c589fe9dae701a2582e6a05f42d26edfe7db88a8..825af5fd35e0fc0bd7008a68ee9622cf65ef6c7b 100644 (file)
@@ -135,7 +135,7 @@ void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
                sizeimage = bytesperline * height;
 
                /* Chroma plane size. */
-               sizeimage += bytesperline * height / 2;
+               sizeimage += bytesperline * ALIGN(height, 64) / 2;
 
                break;
 
index a6d589e89aeb01f171829aa180f0a34f97052baf..f27eba72d646f116d8e6eaf364ed5b7fd8da5391 100644 (file)
@@ -248,7 +248,7 @@ void rtw_hal_update_ra_mask(struct adapter *adapt, u32 mac_id, u8 rssi_level)
 #ifdef CONFIG_88EU_AP_MODE
                struct sta_info *psta = NULL;
                struct sta_priv *pstapriv = &adapt->stapriv;
-               if ((mac_id - 1) > 0)
+               if (mac_id >= 2)
                        psta = pstapriv->sta_aid[(mac_id - 1) - 1];
                if (psta)
                        add_RATid(adapt, psta, 0);/* todo: based on rssi_level*/
index b25369a134525e0c1bf03c45a00fe3b82f964824..967f10b9582a826c8d0101e273f710a1e46bba71 100644 (file)
@@ -182,7 +182,7 @@ create_pagelist(char *buf, char __user *ubuf,
                offset = (uintptr_t)ubuf & (PAGE_SIZE - 1);
        num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
 
-       if (num_pages > (SIZE_MAX - sizeof(struct pagelist) -
+       if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) -
                         sizeof(struct vchiq_pagelist_info)) /
                        (sizeof(u32) + sizeof(pages[0]) +
                         sizeof(struct scatterlist)))
index 5ce13b099d7dc8fb7f665ff869f0c3f7cf023359..5363ebebfc357f480515a019e769091f095b69bd 100644 (file)
@@ -585,6 +585,9 @@ static int optee_remove(struct platform_device *pdev)
 {
        struct optee *optee = platform_get_drvdata(pdev);
 
+       /* Unregister OP-TEE specific client devices on TEE bus */
+       optee_unregister_devices();
+
        /*
         * Ask OP-TEE to free all cached shared memory objects to decrease
         * reference counters and also avoid wild pointers in secure world
index ec1d24693ebaae10d89874fc67ead117e961daa8..128a2d2a50a16c540d42bdc5dc1c5fdb84bd328a 100644 (file)
@@ -53,6 +53,13 @@ static int get_devices(struct tee_context *ctx, u32 session,
        return 0;
 }
 
+static void optee_release_device(struct device *dev)
+{
+       struct tee_client_device *optee_device = to_tee_client_device(dev);
+
+       kfree(optee_device);
+}
+
 static int optee_register_device(const uuid_t *device_uuid)
 {
        struct tee_client_device *optee_device = NULL;
@@ -63,6 +70,7 @@ static int optee_register_device(const uuid_t *device_uuid)
                return -ENOMEM;
 
        optee_device->dev.bus = &tee_bus_type;
+       optee_device->dev.release = optee_release_device;
        if (dev_set_name(&optee_device->dev, "optee-ta-%pUb", device_uuid)) {
                kfree(optee_device);
                return -ENOMEM;
@@ -154,3 +162,17 @@ int optee_enumerate_devices(u32 func)
 {
        return  __optee_enumerate_devices(func);
 }
+
+static int __optee_unregister_device(struct device *dev, void *data)
+{
+       if (!strncmp(dev_name(dev), "optee-ta", strlen("optee-ta")))
+               device_unregister(dev);
+
+       return 0;
+}
+
+void optee_unregister_devices(void)
+{
+       bus_for_each_dev(&tee_bus_type, NULL, NULL,
+                        __optee_unregister_device);
+}
index dbdd367be1568330aa5481f9924932196282d5d4..f6bb4a763ba94e80f0463cc42957702ef47f1b93 100644 (file)
@@ -184,6 +184,7 @@ void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
 #define PTA_CMD_GET_DEVICES            0x0
 #define PTA_CMD_GET_DEVICES_SUPP       0x1
 int optee_enumerate_devices(u32 func);
+void optee_unregister_devices(void);
 
 /*
  * Small helpers
index c41a9a501a6e9d8fcf5efce43259aca6466368b6..d167039af519eb904d9bc432e57e2e20b7fa9a57 100644 (file)
@@ -35,7 +35,7 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
                unsigned int nr_pages = 1 << order, i;
                struct page **pages;
 
-               pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL);
+               pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
                if (!pages) {
                        rc = -ENOMEM;
                        goto err;
index da19d7987d0057a83e7137c2eebaf317316a1108..78fd365893c13f284a7de60b399edb0da0a40aec 100644 (file)
@@ -7,6 +7,7 @@ thunderbolt-objs += usb4_port.o nvm.o retimer.o quirks.o
 thunderbolt-${CONFIG_ACPI} += acpi.o
 thunderbolt-$(CONFIG_DEBUG_FS) += debugfs.o
 thunderbolt-${CONFIG_USB4_KUNIT_TEST} += test.o
+CFLAGS_test.o += $(DISABLE_STRUCTLEAK_PLUGIN)
 
 thunderbolt_dma_test-${CONFIG_USB4_DMA_TEST} += dma_test.o
 obj-$(CONFIG_USB4_DMA_TEST) += thunderbolt_dma_test.o
index 8f143c09a1696eeed94e1d4788f06bce1d089d9a..f0bf01ea069ae0a217728987dd5237f8b4b6dd38 100644 (file)
@@ -618,10 +618,8 @@ static int __init xenboot_console_setup(struct console *console, char *string)
 {
        static struct xencons_info xenboot;
 
-       if (xen_initial_domain())
+       if (xen_initial_domain() || !xen_pv_domain())
                return 0;
-       if (!xen_pv_domain())
-               return -ENODEV;
 
        return xencons_info_pv_init(&xenboot, 0);
 }
@@ -632,17 +630,16 @@ static void xenboot_write_console(struct console *console, const char *string,
        unsigned int linelen, off = 0;
        const char *pos;
 
+       if (dom0_write_console(0, string, len) >= 0)
+               return;
+
        if (!xen_pv_domain()) {
                xen_hvm_early_write(0, string, len);
                return;
        }
 
-       dom0_write_console(0, string, len);
-
-       if (xen_initial_domain())
+       if (domU_write_console(0, "(early) ", 8) < 0)
                return;
-
-       domU_write_console(0, "(early) ", 8);
        while (off < len && NULL != (pos = strchr(string+off, '\n'))) {
                linelen = pos-string+off;
                if (off + linelen > len)
index 71ae16de0f90e06ff6e6b9cb0a3de2b64ab6bd34..39fc96dc2531c1aedf93c63a7ccab68694bacdd1 100644 (file)
@@ -361,9 +361,13 @@ config SERIAL_8250_BCM2835AUX
          If unsure, say N.
 
 config SERIAL_8250_FSL
-       bool
+       bool "Freescale 16550 UART support" if COMPILE_TEST && !(PPC || ARM || ARM64)
        depends on SERIAL_8250_CONSOLE
-       default PPC || ARM || ARM64 || COMPILE_TEST
+       default PPC || ARM || ARM64
+       help
+         Selecting this option enables a workaround for a break-detection
+         erratum for Freescale 16550 UARTs in the 8250 driver. It also
+         enables support for ACPI enumeration.
 
 config SERIAL_8250_DW
        tristate "Support for Synopsys DesignWare 8250 quirks"
index 8b7bc10b6e8b44a6169d0027b9eb3693b862835b..f1d100671ee6a174ec3637c2d5f7bc7baec01d36 100644 (file)
@@ -420,11 +420,16 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
        data->phy = devm_usb_get_phy_by_phandle(dev, "fsl,usbphy", 0);
        if (IS_ERR(data->phy)) {
                ret = PTR_ERR(data->phy);
-               /* Return -EINVAL if no usbphy is available */
-               if (ret == -ENODEV)
-                       data->phy = NULL;
-               else
-                       goto err_clk;
+               if (ret == -ENODEV) {
+                       data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0);
+                       if (IS_ERR(data->phy)) {
+                               ret = PTR_ERR(data->phy);
+                               if (ret == -ENODEV)
+                                       data->phy = NULL;
+                               else
+                                       goto err_clk;
+                       }
+               }
        }
 
        pdata.usb_phy = data->phy;
index 4e2f1552f4b78386eae859abb2571087490f5750..7b2e2420ecaea7496c29844c4048bd6d4706ab3c 100644 (file)
@@ -340,6 +340,9 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
                        acm->iocount.overrun++;
                spin_unlock_irqrestore(&acm->read_lock, flags);
 
+               if (newctrl & ACM_CTRL_BRK)
+                       tty_flip_buffer_push(&acm->port);
+
                if (difference)
                        wake_up_all(&acm->wioctl);
 
@@ -475,11 +478,16 @@ static int acm_submit_read_urbs(struct acm *acm, gfp_t mem_flags)
 
 static void acm_process_read_urb(struct acm *acm, struct urb *urb)
 {
+       unsigned long flags;
+
        if (!urb->actual_length)
                return;
 
+       spin_lock_irqsave(&acm->read_lock, flags);
        tty_insert_flip_string(&acm->port, urb->transfer_buffer,
                        urb->actual_length);
+       spin_unlock_irqrestore(&acm->read_lock, flags);
+
        tty_flip_buffer_push(&acm->port);
 }
 
index 35d5908b5478aad688d6ddae66b72c8d3377ab4d..fdf79bcf7eb09e50ae223d60df3348727375fa7e 100644 (file)
@@ -824,7 +824,7 @@ static struct usb_class_driver wdm_class = {
 };
 
 /* --- WWAN framework integration --- */
-#ifdef CONFIG_WWAN_CORE
+#ifdef CONFIG_WWAN
 static int wdm_wwan_port_start(struct wwan_port *port)
 {
        struct wdm_device *desc = wwan_port_get_drvdata(port);
@@ -963,11 +963,11 @@ static void wdm_wwan_rx(struct wdm_device *desc, int length)
        /* inbuf has been copied, it is safe to check for outstanding data */
        schedule_work(&desc->service_outs_intr);
 }
-#else /* CONFIG_WWAN_CORE */
+#else /* CONFIG_WWAN */
 static void wdm_wwan_init(struct wdm_device *desc) {}
 static void wdm_wwan_deinit(struct wdm_device *desc) {}
 static void wdm_wwan_rx(struct wdm_device *desc, int length) {}
-#endif /* CONFIG_WWAN_CORE */
+#endif /* CONFIG_WWAN */
 
 /* --- error handling --- */
 static void wdm_rxwork(struct work_struct *work)
index 5e8a04e3dd3c896ed3a9a4d309d8963202247e74..b856622431a73ef32286c581ab1137dc8bfa50dc 100644 (file)
@@ -6,8 +6,7 @@ config USB_COMMON
 
 config USB_LED_TRIG
        bool "USB LED Triggers"
-       depends on LEDS_CLASS && LEDS_TRIGGERS
-       select USB_COMMON
+       depends on LEDS_CLASS && USB_COMMON && LEDS_TRIGGERS
        help
          This option adds LED triggers for USB host and/or gadget activity.
 
index 804b505481633fa1671dcfd486e369a5a6841e45..4519d06c9ca2b5cef2df29ba1da31ea652210d77 100644 (file)
@@ -4243,7 +4243,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
        }
 
 
-       usb_initialize_gadget(dwc->sysdev, dwc->gadget, dwc_gadget_release);
+       usb_initialize_gadget(dwc->dev, dwc->gadget, dwc_gadget_release);
        dev                             = &dwc->gadget->dev;
        dev->platform_data              = dwc;
        dwc->gadget->ops                = &dwc3_gadget_ops;
index be864560bfeaaace69b716bd32b8e1187d36442f..ef55b8bb5870ac1b9e51ee4e61aaea5143bc4286 100644 (file)
@@ -674,11 +674,17 @@ static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
                ssize = uac2_opts->c_ssize;
        }
 
-       if (!is_playback && (uac2_opts->c_sync == USB_ENDPOINT_SYNC_ASYNC))
+       if (!is_playback && (uac2_opts->c_sync == USB_ENDPOINT_SYNC_ASYNC)) {
+         // Win10 requires max packet size + 1 frame
                srate = srate * (1000 + uac2_opts->fb_max) / 1000;
-
-       max_size_bw = num_channels(chmask) * ssize *
-               DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1)));
+               // updated srate is always bigger, therefore DIV_ROUND_UP always yields +1
+               max_size_bw = num_channels(chmask) * ssize *
+                       (DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1))));
+       } else {
+               // adding 1 frame provision for Win10
+               max_size_bw = num_channels(chmask) * ssize *
+                       (DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1))) + 1);
+       }
        ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw,
                                                    max_size_ep));
 
index 0b3722770760ca4c2a59ad1a0ef183148e27bcd5..ded9738392e4c6cdabcc9be8e7c794554ba27342 100644 (file)
 #include <mach/usb.h>
 
 
-/* OMAP-1510 OHCI has its own MMU for DMA */
-#define OMAP1510_LB_MEMSIZE    32      /* Should be same as SDRAM size */
-#define OMAP1510_LB_CLOCK_DIV  0xfffec10c
-#define OMAP1510_LB_MMU_CTL    0xfffec208
-#define OMAP1510_LB_MMU_LCK    0xfffec224
-#define OMAP1510_LB_MMU_LD_TLB 0xfffec228
-#define OMAP1510_LB_MMU_CAM_H  0xfffec22c
-#define OMAP1510_LB_MMU_CAM_L  0xfffec230
-#define OMAP1510_LB_MMU_RAM_H  0xfffec234
-#define OMAP1510_LB_MMU_RAM_L  0xfffec238
-
 #define DRIVER_DESC "OHCI OMAP driver"
 
 struct ohci_omap_priv {
@@ -104,61 +93,6 @@ static int omap_ohci_transceiver_power(struct ohci_omap_priv *priv, int on)
        return 0;
 }
 
-#ifdef CONFIG_ARCH_OMAP15XX
-/*
- * OMAP-1510 specific Local Bus clock on/off
- */
-static int omap_1510_local_bus_power(int on)
-{
-       if (on) {
-               omap_writel((1 << 1) | (1 << 0), OMAP1510_LB_MMU_CTL);
-               udelay(200);
-       } else {
-               omap_writel(0, OMAP1510_LB_MMU_CTL);
-       }
-
-       return 0;
-}
-
-/*
- * OMAP-1510 specific Local Bus initialization
- * NOTE: This assumes 32MB memory size in OMAP1510LB_MEMSIZE.
- *       See also arch/mach-omap/memory.h for __virt_to_dma() and
- *       __dma_to_virt() which need to match with the physical
- *       Local Bus address below.
- */
-static int omap_1510_local_bus_init(void)
-{
-       unsigned int tlb;
-       unsigned long lbaddr, physaddr;
-
-       omap_writel((omap_readl(OMAP1510_LB_CLOCK_DIV) & 0xfffffff8) | 0x4,
-              OMAP1510_LB_CLOCK_DIV);
-
-       /* Configure the Local Bus MMU table */
-       for (tlb = 0; tlb < OMAP1510_LB_MEMSIZE; tlb++) {
-               lbaddr = tlb * 0x00100000 + OMAP1510_LB_OFFSET;
-               physaddr = tlb * 0x00100000 + PHYS_OFFSET;
-               omap_writel((lbaddr & 0x0fffffff) >> 22, OMAP1510_LB_MMU_CAM_H);
-               omap_writel(((lbaddr & 0x003ffc00) >> 6) | 0xc,
-                      OMAP1510_LB_MMU_CAM_L);
-               omap_writel(physaddr >> 16, OMAP1510_LB_MMU_RAM_H);
-               omap_writel((physaddr & 0x0000fc00) | 0x300, OMAP1510_LB_MMU_RAM_L);
-               omap_writel(tlb << 4, OMAP1510_LB_MMU_LCK);
-               omap_writel(0x1, OMAP1510_LB_MMU_LD_TLB);
-       }
-
-       /* Enable the walking table */
-       omap_writel(omap_readl(OMAP1510_LB_MMU_CTL) | (1 << 3), OMAP1510_LB_MMU_CTL);
-       udelay(200);
-
-       return 0;
-}
-#else
-#define omap_1510_local_bus_power(x)   {}
-#define omap_1510_local_bus_init()     {}
-#endif
-
 #ifdef CONFIG_USB_OTG
 
 static void start_hnp(struct ohci_hcd *ohci)
@@ -229,10 +163,8 @@ static int ohci_omap_reset(struct usb_hcd *hcd)
 
        omap_ohci_clock_power(priv, 1);
 
-       if (cpu_is_omap15xx()) {
-               omap_1510_local_bus_power(1);
-               omap_1510_local_bus_init();
-       }
+       if (config->lb_reset)
+               config->lb_reset();
 
        ret = ohci_setup(hcd);
        if (ret < 0)
index 6e784f2fc26d14f2cbcd02903172335b9d02c809..eb46e642e87aa5fee70a912d970eb1b7264dcbb5 100644 (file)
@@ -408,40 +408,38 @@ static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc)
                return -EBUSY;
 
        xhci_dbc_tty_init_port(dbc, port);
-       tty_dev = tty_port_register_device(&port->port,
-                                          dbc_tty_driver, 0, NULL);
-       if (IS_ERR(tty_dev)) {
-               ret = PTR_ERR(tty_dev);
-               goto register_fail;
-       }
 
        ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
        if (ret)
-               goto buf_alloc_fail;
+               goto err_exit_port;
 
        ret = xhci_dbc_alloc_requests(dbc, BULK_IN, &port->read_pool,
                                      dbc_read_complete);
        if (ret)
-               goto request_fail;
+               goto err_free_fifo;
 
        ret = xhci_dbc_alloc_requests(dbc, BULK_OUT, &port->write_pool,
                                      dbc_write_complete);
        if (ret)
-               goto request_fail;
+               goto err_free_requests;
+
+       tty_dev = tty_port_register_device(&port->port,
+                                          dbc_tty_driver, 0, NULL);
+       if (IS_ERR(tty_dev)) {
+               ret = PTR_ERR(tty_dev);
+               goto err_free_requests;
+       }
 
        port->registered = true;
 
        return 0;
 
-request_fail:
+err_free_requests:
        xhci_dbc_free_requests(&port->read_pool);
        xhci_dbc_free_requests(&port->write_pool);
+err_free_fifo:
        kfifo_free(&port->write_fifo);
-
-buf_alloc_fail:
-       tty_unregister_device(dbc_tty_driver, 0);
-
-register_fail:
+err_exit_port:
        xhci_dbc_tty_exit_port(port);
 
        dev_err(dbc->dev, "can't register tty port, err %d\n", ret);
index 2c9f25ca8eddf324282cbff84a554b3887f8ee09..2484a9d38ce2b808234d0f4ffafbaa6f59d163ee 100644 (file)
@@ -30,6 +30,7 @@
 #define PCI_VENDOR_ID_FRESCO_LOGIC     0x1b73
 #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
 #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009      0x1009
+#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1100      0x1100
 #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400      0x1400
 
 #define PCI_VENDOR_ID_ETRON            0x1b6f
@@ -113,6 +114,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
        /* Look for vendor-specific quirks */
        if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
                        (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK ||
+                        pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100 ||
                         pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) {
                if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
                                pdev->revision == 0x0) {
@@ -279,8 +281,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                        pdev->device == 0x3432)
                xhci->quirks |= XHCI_BROKEN_STREAMS;
 
-       if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483)
+       if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) {
                xhci->quirks |= XHCI_LPM_SUPPORT;
+               xhci->quirks |= XHCI_EP_CTX_BROKEN_DCS;
+       }
 
        if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
                pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI)
index e676749f543baf02287979d014307d8ae323a2a0..311597bba80e2a4d469277042cbde721e93af335 100644 (file)
@@ -366,16 +366,22 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
 /* Must be called with xhci->lock held, releases and aquires lock back */
 static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
 {
-       u64 temp_64;
+       u32 temp_32;
        int ret;
 
        xhci_dbg(xhci, "Abort command ring\n");
 
        reinit_completion(&xhci->cmd_ring_stop_completion);
 
-       temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
-       xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
-                       &xhci->op_regs->cmd_ring);
+       /*
+        * The control bits like command stop, abort are located in lower
+        * dword of the command ring control register. Limit the write
+        * to the lower dword to avoid corrupting the command ring pointer
+        * in case if the command ring is stopped by the time upper dword
+        * is written.
+        */
+       temp_32 = readl(&xhci->op_regs->cmd_ring);
+       writel(temp_32 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
 
        /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the
         * completion of the Command Abort operation. If CRR is not negated in 5
@@ -559,8 +565,11 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
        struct xhci_ring *ep_ring;
        struct xhci_command *cmd;
        struct xhci_segment *new_seg;
+       struct xhci_segment *halted_seg = NULL;
        union xhci_trb *new_deq;
        int new_cycle;
+       union xhci_trb *halted_trb;
+       int index = 0;
        dma_addr_t addr;
        u64 hw_dequeue;
        bool cycle_found = false;
@@ -598,7 +607,27 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
        hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
        new_seg = ep_ring->deq_seg;
        new_deq = ep_ring->dequeue;
-       new_cycle = hw_dequeue & 0x1;
+
+       /*
+        * Quirk: xHC write-back of the DCS field in the hardware dequeue
+        * pointer is wrong - use the cycle state of the TRB pointed to by
+        * the dequeue pointer.
+        */
+       if (xhci->quirks & XHCI_EP_CTX_BROKEN_DCS &&
+           !(ep->ep_state & EP_HAS_STREAMS))
+               halted_seg = trb_in_td(xhci, td->start_seg,
+                                      td->first_trb, td->last_trb,
+                                      hw_dequeue & ~0xf, false);
+       if (halted_seg) {
+               index = ((dma_addr_t)(hw_dequeue & ~0xf) - halted_seg->dma) /
+                        sizeof(*halted_trb);
+               halted_trb = &halted_seg->trbs[index];
+               new_cycle = halted_trb->generic.field[3] & 0x1;
+               xhci_dbg(xhci, "Endpoint DCS = %d TRB index = %d cycle = %d\n",
+                        (u8)(hw_dequeue & 0x1), index, new_cycle);
+       } else {
+               new_cycle = hw_dequeue & 0x1;
+       }
 
        /*
         * We want to find the pointer, segment and cycle state of the new trb
index 575fa89a783f951a5b1e2b4a904348735b983c3f..1bf494b649bd24c834c26acaf984704e91203974 100644 (file)
@@ -1787,7 +1787,6 @@ static int tegra_xusb_remove(struct platform_device *pdev)
        return 0;
 }
 
-#if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_PM_SLEEP)
 static bool xhci_hub_ports_suspended(struct xhci_hub *hub)
 {
        struct device *dev = hub->hcd->self.controller;
@@ -2102,7 +2101,7 @@ out:
        return err;
 }
 
-static int tegra_xusb_suspend(struct device *dev)
+static __maybe_unused int tegra_xusb_suspend(struct device *dev)
 {
        struct tegra_xusb *tegra = dev_get_drvdata(dev);
        int err;
@@ -2144,7 +2143,7 @@ out:
        return err;
 }
 
-static int tegra_xusb_resume(struct device *dev)
+static __maybe_unused int tegra_xusb_resume(struct device *dev)
 {
        struct tegra_xusb *tegra = dev_get_drvdata(dev);
        int err;
@@ -2174,10 +2173,8 @@ static int tegra_xusb_resume(struct device *dev)
 
        return 0;
 }
-#endif
 
-#ifdef CONFIG_PM
-static int tegra_xusb_runtime_suspend(struct device *dev)
+static __maybe_unused int tegra_xusb_runtime_suspend(struct device *dev)
 {
        struct tegra_xusb *tegra = dev_get_drvdata(dev);
        int ret;
@@ -2190,7 +2187,7 @@ static int tegra_xusb_runtime_suspend(struct device *dev)
        return ret;
 }
 
-static int tegra_xusb_runtime_resume(struct device *dev)
+static __maybe_unused int tegra_xusb_runtime_resume(struct device *dev)
 {
        struct tegra_xusb *tegra = dev_get_drvdata(dev);
        int err;
@@ -2201,7 +2198,6 @@ static int tegra_xusb_runtime_resume(struct device *dev)
 
        return err;
 }
-#endif
 
 static const struct dev_pm_ops tegra_xusb_pm_ops = {
        SET_RUNTIME_PM_OPS(tegra_xusb_runtime_suspend,
index 93c38b557afd379eb16020d111e02c426f8c8d0e..541fe4dcc43a22d51704c145f9291dd91545b973 100644 (file)
@@ -3214,10 +3214,13 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
                return;
 
        /* Bail out if toggle is already being cleared by a endpoint reset */
+       spin_lock_irqsave(&xhci->lock, flags);
        if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
                ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
+               spin_unlock_irqrestore(&xhci->lock, flags);
                return;
        }
+       spin_unlock_irqrestore(&xhci->lock, flags);
        /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */
        if (usb_endpoint_xfer_control(&host_ep->desc) ||
            usb_endpoint_xfer_isoc(&host_ep->desc))
@@ -3303,8 +3306,10 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
        xhci_free_command(xhci, cfg_cmd);
 cleanup:
        xhci_free_command(xhci, stop_cmd);
+       spin_lock_irqsave(&xhci->lock, flags);
        if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
                ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
+       spin_unlock_irqrestore(&xhci->lock, flags);
 }
 
 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
index dca6181c33fdbb9661b455a9811caf4d40c21761..5a75fe5631238467b853ec53e7bfd01cdf5d5b88 100644 (file)
@@ -1899,6 +1899,7 @@ struct xhci_hcd {
 #define XHCI_SG_TRB_CACHE_SIZE_QUIRK   BIT_ULL(39)
 #define XHCI_NO_SOFT_RETRY     BIT_ULL(40)
 #define XHCI_BROKEN_D3COLD     BIT_ULL(41)
+#define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42)
 
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
index ce9fc46c92661cb259dc701ddcbe78e8c730efb0..b5935834f9d2414709d4b50bc8e9465fcb8e9f8b 100644 (file)
@@ -899,11 +899,13 @@ static int dsps_probe(struct platform_device *pdev)
        if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) {
                ret = dsps_setup_optional_vbus_irq(pdev, glue);
                if (ret)
-                       goto err;
+                       goto unregister_pdev;
        }
 
        return 0;
 
+unregister_pdev:
+       platform_device_unregister(glue->musb);
 err:
        pm_runtime_disable(&pdev->dev);
        iounmap(glue->usbss_base);
index 6cfb5d33609fb22c14ed6327bfb47a1584216701..a484ff5e4ebf83761964213eeb33d79f0ef28f6a 100644 (file)
@@ -246,11 +246,13 @@ static void option_instat_callback(struct urb *urb);
 /* These Quectel products use Quectel's vendor ID */
 #define QUECTEL_PRODUCT_EC21                   0x0121
 #define QUECTEL_PRODUCT_EC25                   0x0125
+#define QUECTEL_PRODUCT_EG91                   0x0191
 #define QUECTEL_PRODUCT_EG95                   0x0195
 #define QUECTEL_PRODUCT_BG96                   0x0296
 #define QUECTEL_PRODUCT_EP06                   0x0306
 #define QUECTEL_PRODUCT_EM12                   0x0512
 #define QUECTEL_PRODUCT_RM500Q                 0x0800
+#define QUECTEL_PRODUCT_EC200S_CN              0x6002
 #define QUECTEL_PRODUCT_EC200T                 0x6026
 
 #define CMOTECH_VENDOR_ID                      0x16d8
@@ -1111,6 +1113,9 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0xff, 0xff),
          .driver_info = NUMEP2 },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0, 0) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG91, 0xff, 0xff, 0xff),
+         .driver_info = NUMEP2 },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG91, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
          .driver_info = NUMEP2 },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
@@ -1128,6 +1133,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
          .driver_info = ZLP },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
 
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
@@ -1227,6 +1233,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1203, 0xff),    /* Telit LE910Cx (RNDIS) */
          .driver_info = NCTRL(2) | RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1204, 0xff),    /* Telit LE910Cx (MBIM) */
+         .driver_info = NCTRL(0) | RSVD(1) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
index 83da8236e3c8bc5bfce602eaabea50169a44827e..c18bf8164bc2e9676247f36e672f2c9cea489e30 100644 (file)
@@ -165,6 +165,7 @@ static const struct usb_device_id id_table[] = {
        {DEVICE_SWI(0x1199, 0x907b)},   /* Sierra Wireless EM74xx */
        {DEVICE_SWI(0x1199, 0x9090)},   /* Sierra Wireless EM7565 QDL */
        {DEVICE_SWI(0x1199, 0x9091)},   /* Sierra Wireless EM7565 */
+       {DEVICE_SWI(0x1199, 0x90d2)},   /* Sierra Wireless EM9191 QDL */
        {DEVICE_SWI(0x413c, 0x81a2)},   /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81a3)},   /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81a4)},   /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
index 9858716698dfe71d1f74e6fc6fd3b02388e20a26..c15eec9cc460a01a96553c00c52dda166767b028 100644 (file)
@@ -696,7 +696,7 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci)
                tcpm_pd_receive(tcpci->port, &msg);
        }
 
-       if (status & TCPC_ALERT_EXTENDED_STATUS) {
+       if (tcpci->data->vbus_vsafe0v && (status & TCPC_ALERT_EXTENDED_STATUS)) {
                ret = regmap_read(tcpci->regmap, TCPC_EXTENDED_STATUS, &raw);
                if (!ret && (raw & TCPC_EXTENDED_STATUS_VSAFE0V))
                        tcpm_vbus_change(tcpci->port);
index a4d37205df5491bde61c14294398c89081153741..7f2f3ff1b39112e72b683873c2a031fdb155c644 100644 (file)
@@ -4876,6 +4876,7 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
                        tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
                break;
        case SRC_ATTACHED:
+       case SRC_STARTUP:
        case SRC_SEND_CAPABILITIES:
        case SRC_READY:
                if (tcpm_port_is_disconnected(port) ||
index 21b3ae25c76d2c210396af4d9f6dc6eeabf25a33..ea4cc0a6e40cc2412195b009069b46a7a5ac7343 100644 (file)
@@ -625,10 +625,6 @@ static int tps6598x_probe(struct i2c_client *client)
        if (ret < 0)
                return ret;
 
-       fwnode = device_get_named_child_node(&client->dev, "connector");
-       if (!fwnode)
-               return -ENODEV;
-
        /*
         * This fwnode has a "compatible" property, but is never populated as a
         * struct device. Instead we simply parse it to read the properties.
@@ -636,7 +632,9 @@ static int tps6598x_probe(struct i2c_client *client)
         * with existing DT files, we work around this by deleting any
         * fwnode_links to/from this fwnode.
         */
-       fw_devlink_purge_absent_suppliers(fwnode);
+       fwnode = device_get_named_child_node(&client->dev, "connector");
+       if (fwnode)
+               fw_devlink_purge_absent_suppliers(fwnode);
 
        tps->role_sw = fwnode_usb_role_switch_get(fwnode);
        if (IS_ERR(tps->role_sw)) {
index 294ba05e6fc978fa06741c93db121126fa8cf992..bd56de7484dcb1d72d2b99cbeef7932adfb81888 100644 (file)
@@ -1714,6 +1714,9 @@ static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready
        struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
        struct mlx5_vdpa_virtqueue *mvq;
 
+       if (!mvdev->actual_features)
+               return;
+
        if (!is_index_valid(mvdev, idx))
                return;
 
@@ -2145,6 +2148,8 @@ static void clear_vqs_ready(struct mlx5_vdpa_net *ndev)
 
        for (i = 0; i < ndev->mvdev.max_vqs; i++)
                ndev->vqs[i].ready = false;
+
+       ndev->mvdev.cvq.ready = false;
 }
 
 static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
index 29a38ecba19e43ab9a54906e28bff49d960ec15f..841667a896dd020da0481b07395a23692fc1fedd 100644 (file)
@@ -80,6 +80,7 @@ struct vduse_dev {
        struct vdpa_callback config_cb;
        struct work_struct inject;
        spinlock_t irq_lock;
+       struct rw_semaphore rwsem;
        int minor;
        bool broken;
        bool connected;
@@ -410,6 +411,8 @@ static void vduse_dev_reset(struct vduse_dev *dev)
        if (domain->bounce_map)
                vduse_domain_reset_bounce_map(domain);
 
+       down_write(&dev->rwsem);
+
        dev->status = 0;
        dev->driver_features = 0;
        dev->generation++;
@@ -443,6 +446,8 @@ static void vduse_dev_reset(struct vduse_dev *dev)
                flush_work(&vq->inject);
                flush_work(&vq->kick);
        }
+
+       up_write(&dev->rwsem);
 }
 
 static int vduse_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 idx,
@@ -665,13 +670,11 @@ static void vduse_vdpa_set_config(struct vdpa_device *vdpa, unsigned int offset,
 static int vduse_vdpa_reset(struct vdpa_device *vdpa)
 {
        struct vduse_dev *dev = vdpa_to_vduse(vdpa);
-
-       if (vduse_dev_set_status(dev, 0))
-               return -EIO;
+       int ret = vduse_dev_set_status(dev, 0);
 
        vduse_dev_reset(dev);
 
-       return 0;
+       return ret;
 }
 
 static u32 vduse_vdpa_get_generation(struct vdpa_device *vdpa)
@@ -887,6 +890,23 @@ static void vduse_vq_irq_inject(struct work_struct *work)
        spin_unlock_irq(&vq->irq_lock);
 }
 
+static int vduse_dev_queue_irq_work(struct vduse_dev *dev,
+                                   struct work_struct *irq_work)
+{
+       int ret = -EINVAL;
+
+       down_read(&dev->rwsem);
+       if (!(dev->status & VIRTIO_CONFIG_S_DRIVER_OK))
+               goto unlock;
+
+       ret = 0;
+       queue_work(vduse_irq_wq, irq_work);
+unlock:
+       up_read(&dev->rwsem);
+
+       return ret;
+}
+
 static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
                            unsigned long arg)
 {
@@ -968,8 +988,7 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
                break;
        }
        case VDUSE_DEV_INJECT_CONFIG_IRQ:
-               ret = 0;
-               queue_work(vduse_irq_wq, &dev->inject);
+               ret = vduse_dev_queue_irq_work(dev, &dev->inject);
                break;
        case VDUSE_VQ_SETUP: {
                struct vduse_vq_config config;
@@ -1055,9 +1074,8 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
                if (index >= dev->vq_num)
                        break;
 
-               ret = 0;
                index = array_index_nospec(index, dev->vq_num);
-               queue_work(vduse_irq_wq, &dev->vqs[index].inject);
+               ret = vduse_dev_queue_irq_work(dev, &dev->vqs[index].inject);
                break;
        }
        default:
@@ -1138,6 +1156,7 @@ static struct vduse_dev *vduse_dev_create(void)
        INIT_LIST_HEAD(&dev->send_list);
        INIT_LIST_HEAD(&dev->recv_list);
        spin_lock_init(&dev->irq_lock);
+       init_rwsem(&dev->rwsem);
 
        INIT_WORK(&dev->inject, vduse_dev_irq_inject);
        init_waitqueue_head(&dev->waitq);
@@ -1593,8 +1612,10 @@ static int vduse_init(void)
 
        vduse_irq_wq = alloc_workqueue("vduse-irq",
                                WQ_HIGHPRI | WQ_SYSFS | WQ_UNBOUND, 0);
-       if (!vduse_irq_wq)
+       if (!vduse_irq_wq) {
+               ret = -ENOMEM;
                goto err_wq;
+       }
 
        ret = vduse_domain_init();
        if (ret)
index 68198e0f2a63106bd345dc8e1ffb8d27bbe49555..a03b5a99c2dac7ec6e6d52838fb752802fc51fbd 100644 (file)
@@ -565,7 +565,7 @@ static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
 }
 
 struct vfio_pci_walk_info {
-       int (*fn)(struct pci_dev *, void *data);
+       int (*fn)(struct pci_dev *pdev, void *data);
        void *data;
        struct pci_dev *pdev;
        bool slot;
index f41d081777f58b0f8f71b5d4b2a88a1d3a9abb02..39039e0461175be42386471b3f7e2c204d0b7089 100644 (file)
@@ -173,6 +173,10 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
        if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
                return -EINVAL;
 
+       if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
+               for (i = 0; i < nvqs; i++)
+                       vhost_vdpa_unsetup_vq_irq(v, i);
+
        if (status == 0) {
                ret = ops->reset(vdpa);
                if (ret)
@@ -184,10 +188,6 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
                for (i = 0; i < nvqs; i++)
                        vhost_vdpa_setup_vq_irq(v, i);
 
-       if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
-               for (i = 0; i < nvqs; i++)
-                       vhost_vdpa_unsetup_vq_irq(v, i);
-
        return 0;
 }
 
@@ -322,7 +322,7 @@ static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
        struct eventfd_ctx *ctx;
 
        cb.callback = vhost_vdpa_config_cb;
-       cb.private = v->vdpa;
+       cb.private = v;
        if (copy_from_user(&fd, argp, sizeof(fd)))
                return  -EFAULT;
 
@@ -640,7 +640,7 @@ static int vhost_vdpa_va_map(struct vhost_vdpa *v,
        u64 offset, map_size, map_iova = iova;
        struct vdpa_map_file *map_file;
        struct vm_area_struct *vma;
-       int ret;
+       int ret = 0;
 
        mmap_read_lock(dev->mm);
 
index b26b79dfcac92b20305140c4434eda091cbc74f8..6ed5e608dd04129fc7933789ecb7a74f3583cacc 100644 (file)
@@ -2193,8 +2193,9 @@ config FB_HYPERV
          This framebuffer driver supports Microsoft Hyper-V Synthetic Video.
 
 config FB_SIMPLE
-       bool "Simple framebuffer support"
-       depends on (FB = y) && !DRM_SIMPLEDRM
+       tristate "Simple framebuffer support"
+       depends on FB
+       depends on !DRM_SIMPLEDRM
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
        select FB_CFB_IMAGEBLIT
index c5b99a4861e8709db4e96e7d683dc1731a945491..6b4d5a7f3e152b57cef93bb3f5f78966478d0741 100644 (file)
@@ -1267,7 +1267,7 @@ static struct platform_device *gbefb_device;
 static int __init gbefb_init(void)
 {
        int ret = platform_driver_register(&gbefb_driver);
-       if (!ret) {
+       if (IS_ENABLED(CONFIG_SGI_IP32) && !ret) {
                gbefb_device = platform_device_alloc("gbefb", 0);
                if (gbefb_device) {
                        ret = platform_device_add(gbefb_device);
index 588e02fb91d378aebf222d9f7d39da4d0348e1f9..236081afe9a2a38566a4e8e25d9ff0331d032ec5 100644 (file)
@@ -239,6 +239,17 @@ static int virtio_dev_probe(struct device *_d)
                driver_features_legacy = driver_features;
        }
 
+       /*
+        * Some devices detect legacy solely via F_VERSION_1. Write
+        * F_VERSION_1 to force LE config space accesses before FEATURES_OK for
+        * these when needed.
+        */
+       if (drv->validate && !virtio_legacy_is_little_endian()
+                         && device_features & BIT_ULL(VIRTIO_F_VERSION_1)) {
+               dev->features = BIT_ULL(VIRTIO_F_VERSION_1);
+               dev->config->finalize_features(dev);
+       }
+
        if (device_features & (1ULL << VIRTIO_F_VERSION_1))
                dev->features = driver_features & device_features;
        else
@@ -345,8 +356,13 @@ static int virtio_device_of_init(struct virtio_device *dev)
        ret = snprintf(compat, sizeof(compat), "virtio,device%x", dev->id.device);
        BUG_ON(ret >= sizeof(compat));
 
+       /*
+        * On powerpc/pseries virtio devices are PCI devices so PCI
+        * vendor/device ids play the role of the "compatible" property.
+        * Simply don't init of_node in this case.
+        */
        if (!of_device_is_compatible(np, compat)) {
-               ret = -EINVAL;
+               ret = 0;
                goto out;
        }
 
index dd95dfd85e9808575aeb65a04b86a49332d5db14..3035bb6f545854fe7a3db1861b05b616de668c18 100644 (file)
@@ -576,7 +576,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
        /* Last one doesn't continue. */
        desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
        if (!indirect && vq->use_dma_api)
-               vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags =
+               vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
                        ~VRING_DESC_F_NEXT;
 
        if (indirect) {
index b81fe4f7d4341b122fc5e706221f80f63e04a6af..bf59faeb3de1baea554ca0e9d4bd597bf86a673a 100644 (file)
@@ -1666,7 +1666,7 @@ config WDT_MTX1
 
 config SIBYTE_WDOG
        tristate "Sibyte SoC hardware watchdog"
-       depends on CPU_SB1 || (MIPS && COMPILE_TEST)
+       depends on CPU_SB1
        help
          Watchdog driver for the built in watchdog hardware in Sibyte
          SoC processors.  There are apparently two watchdog timers
index 643c6c2d0b7289c5f287b91b4abe0fab77000a82..ced2fc0deb8c4a32cbf005965325ea1a5914985d 100644 (file)
@@ -71,8 +71,6 @@
 #define TCOBASE(p)     ((p)->tco_res->start)
 /* SMI Control and Enable Register */
 #define SMI_EN(p)      ((p)->smi_res->start)
-#define TCO_EN         (1 << 13)
-#define GBL_SMI_EN     (1 << 0)
 
 #define TCO_RLD(p)     (TCOBASE(p) + 0x00) /* TCO Timer Reload/Curr. Value */
 #define TCOv1_TMR(p)   (TCOBASE(p) + 0x01) /* TCOv1 Timer Initial Value*/
@@ -357,12 +355,8 @@ static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t)
 
        tmrval = seconds_to_ticks(p, t);
 
-       /*
-        * If TCO SMIs are off, the timer counts down twice before rebooting.
-        * Otherwise, the BIOS generally reboots when the SMI triggers.
-        */
-       if (p->smi_res &&
-           (inl(SMI_EN(p)) & (TCO_EN | GBL_SMI_EN)) != (TCO_EN | GBL_SMI_EN))
+       /* For TCO v1 the timer counts down twice before rebooting */
+       if (p->iTCO_version == 1)
                tmrval /= 2;
 
        /* from the specs: */
@@ -527,7 +521,7 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
                 * Disables TCO logic generating an SMI#
                 */
                val32 = inl(SMI_EN(p));
-               val32 &= ~TCO_EN;       /* Turn off SMI clearing watchdog */
+               val32 &= 0xffffdfff;    /* Turn off SMI clearing watchdog */
                outl(val32, SMI_EN(p));
        }
 
index 2693ffb24ac7ebfbdace2d2b435e4146c24c75d0..31b03fa713412dd0b06e770efddb7d950c583e09 100644 (file)
@@ -119,7 +119,7 @@ static int ixp4xx_wdt_probe(struct platform_device *pdev)
        iwdt = devm_kzalloc(dev, sizeof(*iwdt), GFP_KERNEL);
        if (!iwdt)
                return -ENOMEM;
-       iwdt->base = dev->platform_data;
+       iwdt->base = (void __iomem *)dev->platform_data;
 
        /*
         * Retrieve rate from a fixed clock from the device tree if
index 1616f93dfad7fcdccf7d74a7dc18d979a1643e7a..74d785b2b478f4719c78eae1425f826e7368b526 100644 (file)
@@ -268,8 +268,12 @@ static int omap_wdt_probe(struct platform_device *pdev)
                        wdev->wdog.bootstatus = WDIOF_CARDRESET;
        }
 
-       if (!early_enable)
+       if (early_enable) {
+               omap_wdt_start(&wdev->wdog);
+               set_bit(WDOG_HW_RUNNING, &wdev->wdog.status);
+       } else {
                omap_wdt_disable(wdev);
+       }
 
        ret = watchdog_register_device(&wdev->wdog);
        if (ret) {
index ee9ff38929eb59b305a921fe98b8e1ef6b432b01..9791c74aebd489a3fee2efc388ace4adb24eda15 100644 (file)
@@ -130,7 +130,7 @@ static u64 sbsa_gwdt_reg_read(struct sbsa_gwdt *gwdt)
        if (gwdt->version == 0)
                return readl(gwdt->control_base + SBSA_GWDT_WOR);
        else
-               return readq(gwdt->control_base + SBSA_GWDT_WOR);
+               return lo_hi_readq(gwdt->control_base + SBSA_GWDT_WOR);
 }
 
 static void sbsa_gwdt_reg_write(u64 val, struct sbsa_gwdt *gwdt)
@@ -138,7 +138,7 @@ static void sbsa_gwdt_reg_write(u64 val, struct sbsa_gwdt *gwdt)
        if (gwdt->version == 0)
                writel((u32)val, gwdt->control_base + SBSA_GWDT_WOR);
        else
-               writeq(val, gwdt->control_base + SBSA_GWDT_WOR);
+               lo_hi_writeq(val, gwdt->control_base + SBSA_GWDT_WOR);
 }
 
 /*
@@ -411,4 +411,3 @@ MODULE_AUTHOR("Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>");
 MODULE_AUTHOR("Al Stone <al.stone@linaro.org>");
 MODULE_AUTHOR("Timur Tabi <timur@codeaurora.org>");
 MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" DRV_NAME);
index 22f5aff0c1367127dfc73a5a81f1c19f8b7b2ecd..1b2c3aca6887c848678c822261af8662b100b506 100644 (file)
@@ -241,7 +241,7 @@ config XEN_PRIVCMD
 
 config XEN_ACPI_PROCESSOR
        tristate "Xen ACPI processor"
-       depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
+       depends on XEN && XEN_PV_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
        default m
        help
          This ACPI processor uploads Power Management information to the Xen
@@ -259,7 +259,7 @@ config XEN_ACPI_PROCESSOR
 
 config XEN_MCE_LOG
        bool "Xen platform mcelog"
-       depends on XEN_DOM0 && X86_MCE
+       depends on XEN_PV_DOM0 && X86_MCE
        help
          Allow kernel fetching MCE error from Xen platform and
          converting it into Linux mcelog format for mcelog tools
index 43ebfe36ac2767358f8471db489d825e51ae96a8..3a50f097ed3edb9ea9d8c712cc84fc2493032457 100644 (file)
@@ -491,12 +491,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
 }
 
 /*
- * Stop waiting if either state is not BP_EAGAIN and ballooning action is
- * needed, or if the credit has changed while state is BP_EAGAIN.
+ * Stop waiting if either state is BP_DONE and ballooning action is
+ * needed, or if the credit has changed while state is not BP_DONE.
  */
 static bool balloon_thread_cond(enum bp_state state, long credit)
 {
-       if (state != BP_EAGAIN)
+       if (state == BP_DONE)
                credit = 0;
 
        return current_credit() != credit || kthread_should_stop();
@@ -516,10 +516,19 @@ static int balloon_thread(void *unused)
 
        set_freezable();
        for (;;) {
-               if (state == BP_EAGAIN)
-                       timeout = balloon_stats.schedule_delay * HZ;
-               else
+               switch (state) {
+               case BP_DONE:
+               case BP_ECANCELED:
                        timeout = 3600 * HZ;
+                       break;
+               case BP_EAGAIN:
+                       timeout = balloon_stats.schedule_delay * HZ;
+                       break;
+               case BP_WAIT:
+                       timeout = HZ;
+                       break;
+               }
+
                credit = current_credit();
 
                wait_event_freezable_timeout(balloon_thread_wq,
index 720a7b7abd46d690f2ed86f544064403ab4f1647..3369734108af23724e728531ce4024ea752b7fbc 100644 (file)
@@ -257,7 +257,7 @@ static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
        LIST_HEAD(pagelist);
        struct mmap_gfn_state state;
 
-       /* We only support privcmd_ioctl_mmap_batch for auto translated. */
+       /* We only support privcmd_ioctl_mmap_batch for non-auto-translated. */
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return -ENOSYS;
 
@@ -420,7 +420,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
        int rc;
        struct page **pages;
 
-       pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
+       pages = kvcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
        if (pages == NULL)
                return -ENOMEM;
 
@@ -428,7 +428,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
        if (rc != 0) {
                pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
                        numpgs, rc);
-               kfree(pages);
+               kvfree(pages);
                return -ENOMEM;
        }
        BUG_ON(vma->vm_private_data != NULL);
@@ -803,21 +803,21 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
                unsigned int domid =
                        (xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
                        DOMID_SELF : kdata.dom;
-               int num;
+               int num, *errs = (int *)pfns;
 
+               BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns));
                num = xen_remap_domain_mfn_array(vma,
                                                 kdata.addr & PAGE_MASK,
-                                                pfns, kdata.num, (int *)pfns,
+                                                pfns, kdata.num, errs,
                                                 vma->vm_page_prot,
-                                                domid,
-                                                vma->vm_private_data);
+                                                domid);
                if (num < 0)
                        rc = num;
                else if (num != kdata.num) {
                        unsigned int i;
 
                        for (i = 0; i < num; i++) {
-                               rc = pfns[i];
+                               rc = errs[i];
                                if (rc < 0)
                                        break;
                        }
@@ -912,7 +912,7 @@ static void privcmd_close(struct vm_area_struct *vma)
        else
                pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
                        numpgs, rc);
-       kfree(pages);
+       kvfree(pages);
 }
 
 static vm_fault_t privcmd_fault(struct vm_fault *vmf)
index eb2151fb60494c9330b29dcd136a577f9fdf2286..1769a44f4819275130de594a45731a9c0af9f6c4 100644 (file)
@@ -23,7 +23,7 @@ struct fscache_netfs v9fs_cache_netfs = {
        .version        = 0,
 };
 
-/**
+/*
  * v9fs_random_cachetag - Generate a random tag to be associated
  *                       with a new cache session.
  *
@@ -233,7 +233,7 @@ static void v9fs_vfs_readpage_complete(struct page *page, void *data,
        unlock_page(page);
 }
 
-/**
+/*
  * __v9fs_readpage_from_fscache - read a page from cache
  *
  * Returns 0 if the pages are in cache and a BIO is submitted,
@@ -268,7 +268,7 @@ int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page)
        }
 }
 
-/**
+/*
  * __v9fs_readpages_from_fscache - read multiple pages from cache
  *
  * Returns 0 if the pages are in cache and a BIO is submitted,
@@ -308,7 +308,7 @@ int __v9fs_readpages_from_fscache(struct inode *inode,
        }
 }
 
-/**
+/*
  * __v9fs_readpage_to_fscache - write a page to the cache
  *
  */
index 9d9de62592be22e3a331eafc3556a190708fc698..b8863dd0de5cca569da550414f6c6fb446c6dda8 100644 (file)
 #include "v9fs_vfs.h"
 #include "fid.h"
 
+static inline void __add_fid(struct dentry *dentry, struct p9_fid *fid)
+{
+       hlist_add_head(&fid->dlist, (struct hlist_head *)&dentry->d_fsdata);
+}
+
+
 /**
  * v9fs_fid_add - add a fid to a dentry
  * @dentry: dentry that the fid is being added to
  * @fid: fid to add
  *
  */
-
-static inline void __add_fid(struct dentry *dentry, struct p9_fid *fid)
-{
-       hlist_add_head(&fid->dlist, (struct hlist_head *)&dentry->d_fsdata);
-}
-
 void v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid)
 {
        spin_lock(&dentry->d_lock);
@@ -67,7 +67,7 @@ static struct p9_fid *v9fs_fid_find_inode(struct inode *inode, kuid_t uid)
 
 /**
  * v9fs_open_fid_add - add an open fid to an inode
- * @dentry: inode that the fid is being added to
+ * @inode: inode that the fid is being added to
  * @fid: fid to add
  *
  */
index cdb99507ef33d6e2c6e358d28ce2859260684506..2e0fa7c932db0e8552233e69268678bd42a52089 100644 (file)
@@ -155,6 +155,7 @@ int v9fs_show_options(struct seq_file *m, struct dentry *root)
 /**
  * v9fs_parse_options - parse mount options into session structure
  * @v9ses: existing v9fs session information
+ * @opts: The mount option string
  *
  * Return 0 upon success, -ERRNO upon failure.
  */
@@ -542,12 +543,9 @@ extern int v9fs_error_init(void);
 static struct kobject *v9fs_kobj;
 
 #ifdef CONFIG_9P_FSCACHE
-/**
- * caches_show - list caches associated with a session
- *
- * Returns the size of buffer written.
+/*
+ * List caches associated with a session
  */
-
 static ssize_t caches_show(struct kobject *kobj,
                           struct kobj_attribute *attr,
                           char *buf)
index cce9ace651a2dbce718bf0d344cd6eae095fffd5..1c4f1b39cc9505e4d29ed0ce3fac52be8036217f 100644 (file)
@@ -30,8 +30,7 @@
 
 /**
  * v9fs_fid_readpage - read an entire page in from 9P
- *
- * @fid: fid being read
+ * @data: Opaque pointer to the fid being read
  * @page: structure to page
  *
  */
@@ -116,6 +115,8 @@ static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping,
 
 /**
  * v9fs_release_page - release the private state associated with a page
+ * @page: The page to be released
+ * @gfp: The caller's allocation restrictions
  *
  * Returns 1 if the page can be released, false otherwise.
  */
@@ -129,9 +130,9 @@ static int v9fs_release_page(struct page *page, gfp_t gfp)
 
 /**
  * v9fs_invalidate_page - Invalidate a page completely or partially
- *
- * @page: structure to page
- * @offset: offset in the page
+ * @page: The page to be invalidated
+ * @offset: offset of the invalidated region
+ * @length: length of the invalidated region
  */
 
 static void v9fs_invalidate_page(struct page *page, unsigned int offset,
@@ -199,6 +200,8 @@ static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
 
 /**
  * v9fs_launder_page - Writeback a dirty page
+ * @page: The page to be cleaned up
+ *
  * Returns 0 on success.
  */
 
@@ -219,6 +222,7 @@ static int v9fs_launder_page(struct page *page)
 /**
  * v9fs_direct_IO - 9P address space operation for direct I/O
  * @iocb: target I/O control block
+ * @iter: The data/buffer to use
  *
  * The presence of v9fs_direct_IO() in the address space ops vector
  * allowes open() O_DIRECT flags which would have failed otherwise.
index aab5e653866040bc337781f05937190fd1426100..246235ebdb70a4b1f4b5bf6afb60db056e4426a5 100644 (file)
@@ -359,14 +359,11 @@ out_err:
 }
 
 /**
- * v9fs_file_read - read from a file
- * @filp: file pointer to read
- * @udata: user data buffer to read data into
- * @count: size of buffer
- * @offset: offset at which to read data
+ * v9fs_file_read_iter - read from a file
+ * @iocb: The operation parameters
+ * @to: The buffer to read into
  *
  */
-
 static ssize_t
 v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
@@ -388,11 +385,9 @@ v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 }
 
 /**
- * v9fs_file_write - write to a file
- * @filp: file pointer to write
- * @data: data buffer to write data from
- * @count: size of buffer
- * @offset: offset at which to write data
+ * v9fs_file_write_iter - write to a file
+ * @iocb: The operation parameters
+ * @from: The data to write
  *
  */
 static ssize_t
@@ -561,11 +556,9 @@ out_unlock:
 }
 
 /**
- * v9fs_mmap_file_read - read from a file
- * @filp: file pointer to read
- * @data: user data buffer to read data into
- * @count: size of buffer
- * @offset: offset at which to read data
+ * v9fs_mmap_file_read_iter - read from a file
+ * @iocb: The operation parameters
+ * @to: The buffer to read into
  *
  */
 static ssize_t
@@ -576,11 +569,9 @@ v9fs_mmap_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 }
 
 /**
- * v9fs_mmap_file_write - write to a file
- * @filp: file pointer to write
- * @data: data buffer to write data from
- * @count: size of buffer
- * @offset: offset at which to write data
+ * v9fs_mmap_file_write_iter - write to a file
+ * @iocb: The operation parameters
+ * @from: The data to write
  *
  */
 static ssize_t
index 795706520b5e716783148c029248d84cd9a6eef6..08f48b70a741456c978ee2475d8a94d69560eb1f 100644 (file)
@@ -218,7 +218,7 @@ v9fs_blank_wstat(struct p9_wstat *wstat)
 
 /**
  * v9fs_alloc_inode - helper function to allocate an inode
- *
+ * @sb: The superblock to allocate the inode from
  */
 struct inode *v9fs_alloc_inode(struct super_block *sb)
 {
@@ -238,7 +238,7 @@ struct inode *v9fs_alloc_inode(struct super_block *sb)
 
 /**
  * v9fs_free_inode - destroy an inode
- *
+ * @inode: The inode to be freed
  */
 
 void v9fs_free_inode(struct inode *inode)
@@ -343,7 +343,7 @@ error:
  * v9fs_get_inode - helper function to setup an inode
  * @sb: superblock
  * @mode: mode to setup inode with
- *
+ * @rdev: The device numbers to set
  */
 
 struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t rdev)
@@ -369,7 +369,7 @@ struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t rdev)
 }
 
 /**
- * v9fs_clear_inode - release an inode
+ * v9fs_evict_inode - Remove an inode from the inode cache
  * @inode: inode to release
  *
  */
@@ -665,14 +665,15 @@ error:
 
 /**
  * v9fs_vfs_create - VFS hook to create a regular file
+ * @mnt_userns: The user namespace of the mount
+ * @dir: The parent directory
+ * @dentry: The name of file to be created
+ * @mode: The UNIX file mode to set
+ * @excl: True if the file must not yet exist
  *
  * open(.., O_CREAT) is handled in v9fs_vfs_atomic_open().  This is only called
  * for mknod(2).
  *
- * @dir: directory inode that is being created
- * @dentry:  dentry that is being deleted
- * @mode: create permissions
- *
  */
 
 static int
@@ -696,6 +697,7 @@ v9fs_vfs_create(struct user_namespace *mnt_userns, struct inode *dir,
 
 /**
  * v9fs_vfs_mkdir - VFS mkdir hook to create a directory
+ * @mnt_userns: The user namespace of the mount
  * @dir:  inode that is being unlinked
  * @dentry: dentry that is being unlinked
  * @mode: mode for new directory
@@ -900,10 +902,12 @@ int v9fs_vfs_rmdir(struct inode *i, struct dentry *d)
 
 /**
  * v9fs_vfs_rename - VFS hook to rename an inode
+ * @mnt_userns: The user namespace of the mount
  * @old_dir:  old dir inode
  * @old_dentry: old dentry
  * @new_dir: new dir inode
  * @new_dentry: new dentry
+ * @flags: RENAME_* flags
  *
  */
 
@@ -1009,6 +1013,7 @@ done:
 
 /**
  * v9fs_vfs_getattr - retrieve file metadata
+ * @mnt_userns: The user namespace of the mount
  * @path: Object to query
  * @stat: metadata structure to populate
  * @request_mask: Mask of STATX_xxx flags indicating the caller's interests
@@ -1050,6 +1055,7 @@ v9fs_vfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
 
 /**
  * v9fs_vfs_setattr - set file metadata
+ * @mnt_userns: The user namespace of the mount
  * @dentry: file whose metadata to set
  * @iattr: metadata assignment structure
  *
@@ -1285,6 +1291,7 @@ static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry,
 
 /**
  * v9fs_vfs_symlink - helper function to create symlinks
+ * @mnt_userns: The user namespace of the mount
  * @dir: directory inode containing symlink
  * @dentry: dentry for symlink
  * @symname: symlink data
@@ -1340,6 +1347,7 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir,
 
 /**
  * v9fs_vfs_mknod - create a special file
+ * @mnt_userns: The user namespace of the mount
  * @dir: inode destination for new link
  * @dentry: dentry for file
  * @mode: mode for creation
index e1c0240b51c0350c99605eb502f109f2949337f3..01b9e1281a297968e13cdab4349f7df11fd6e263 100644 (file)
@@ -37,7 +37,10 @@ v9fs_vfs_mknod_dotl(struct user_namespace *mnt_userns, struct inode *dir,
                    struct dentry *dentry, umode_t omode, dev_t rdev);
 
 /**
- * v9fs_get_fsgid_for_create - Helper function to get the gid for creating a
+ * v9fs_get_fsgid_for_create - Helper function to get the gid for a new object
+ * @dir_inode: The directory inode
+ *
+ * Helper function to get the gid for creating a
  * new file system object. This checks the S_ISGID to determine the owning
  * group of the new file system object.
  */
@@ -211,12 +214,13 @@ int v9fs_open_to_dotl_flags(int flags)
 
 /**
  * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
+ * @mnt_userns: The user namespace of the mount
  * @dir: directory inode that is being created
  * @dentry:  dentry that is being deleted
  * @omode: create permissions
+ * @excl: True if the file must not yet exist
  *
  */
-
 static int
 v9fs_vfs_create_dotl(struct user_namespace *mnt_userns, struct inode *dir,
                     struct dentry *dentry, umode_t omode, bool excl)
@@ -361,6 +365,7 @@ err_clunk_old_fid:
 
 /**
  * v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory
+ * @mnt_userns: The user namespace of the mount
  * @dir:  inode that is being unlinked
  * @dentry: dentry that is being unlinked
  * @omode: mode for new directory
@@ -537,6 +542,7 @@ static int v9fs_mapped_iattr_valid(int iattr_valid)
 
 /**
  * v9fs_vfs_setattr_dotl - set file metadata
+ * @mnt_userns: The user namespace of the mount
  * @dentry: file whose metadata to set
  * @iattr: metadata assignment structure
  *
@@ -816,6 +822,7 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
 
 /**
  * v9fs_vfs_mknod_dotl - create a special file
+ * @mnt_userns: The user namespace of the mount
  * @dir: inode destination for new link
  * @dentry: dentry for file
  * @omode: mode for creation
index dae9a57d7ec0c11b105ffa7fc26c3da40d9a7158..45cfd50a95210b95375f1df9a44db34aabc3bb0c 100644 (file)
@@ -86,8 +86,8 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
        return afs_do_sync_operation(op);
 }
 
-/**
- * afs_sillyrename - Perform a silly-rename of a dentry
+/*
+ * Perform silly-rename of a dentry.
  *
  * AFS is stateless and the server doesn't know when the client is holding a
  * file open.  To prevent application problems when a file is unlinked while
index 2dfe3b3a53d69ab08ead53808f0ab95c5a063873..f24370f5c7744a953549614fa8d966df0018ed28 100644 (file)
@@ -974,8 +974,7 @@ int afs_launder_page(struct page *page)
                iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len);
 
                trace_afs_page_dirty(vnode, tracepoint_string("launder"), page);
-               ret = afs_store_data(vnode, &iter, (loff_t)page->index * PAGE_SIZE,
-                                    true);
+               ret = afs_store_data(vnode, &iter, page_offset(page) + f, true);
        }
 
        trace_afs_page_dirty(vnode, tracepoint_string("laundered"), page);
index 16b5fca0626e6388427a428438f04adb75989296..54c1f8b8b07576b4d59f0b53ec4f017a5c9fd50f 100644 (file)
@@ -358,7 +358,7 @@ int autofs_wait(struct autofs_sb_info *sbi,
                qstr.len = strlen(p);
                offset = p - name;
        }
-       qstr.hash = full_name_hash(dentry, name, qstr.len);
+       qstr.hash = full_name_hash(dentry, qstr.name, qstr.len);
 
        if (mutex_lock_interruptible(&sbi->wq_mutex)) {
                kfree(name);
index 69d900a8473d4e39080635697796e9ec340b7b5e..a813b70f594e69ee043781a61de130a636ef9c6a 100644 (file)
@@ -630,7 +630,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
 
                        vaddr = eppnt->p_vaddr;
                        if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
-                               elf_type |= MAP_FIXED_NOREPLACE;
+                               elf_type |= MAP_FIXED;
                        else if (no_base && interp_elf_ex->e_type == ET_DYN)
                                load_addr = -vaddr;
 
index dff2c8a3e059f92379b50da3909f831eef20a51a..c0cebcf745cefdbe94ac83f3605acc380ef18a5c 100644 (file)
@@ -3030,7 +3030,7 @@ struct btrfs_dir_item *
 btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
                            struct btrfs_root *root,
                            struct btrfs_path *path, u64 dir,
-                           u64 objectid, const char *name, int name_len,
+                           u64 index, const char *name, int name_len,
                            int mod);
 struct btrfs_dir_item *
 btrfs_search_dir_index_item(struct btrfs_root *root,
index f1274d5c3805eb6a8871ad2c51980977132c5b1e..7721ce0c060483256d1d529449753d152b7ffe1d 100644 (file)
@@ -190,9 +190,20 @@ static struct btrfs_dir_item *btrfs_lookup_match_dir(
 }
 
 /*
- * lookup a directory item based on name.  'dir' is the objectid
- * we're searching in, and 'mod' tells us if you plan on deleting the
- * item (use mod < 0) or changing the options (use mod > 0)
+ * Lookup for a directory item by name.
+ *
+ * @trans:     The transaction handle to use. Can be NULL if @mod is 0.
+ * @root:      The root of the target tree.
+ * @path:      Path to use for the search.
+ * @dir:       The inode number (objectid) of the directory.
+ * @name:      The name associated to the directory entry we are looking for.
+ * @name_len:  The length of the name.
+ * @mod:       Used to indicate if the tree search is meant for a read only
+ *             lookup, for a modification lookup or for a deletion lookup, so
+ *             its value should be 0, 1 or -1, respectively.
+ *
+ * Returns: NULL if the dir item does not exists, an error pointer if an error
+ * happened, or a pointer to a dir item if a dir item exists for the given name.
  */
 struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
                                             struct btrfs_root *root,
@@ -273,27 +284,42 @@ out:
 }
 
 /*
- * lookup a directory item based on index.  'dir' is the objectid
- * we're searching in, and 'mod' tells us if you plan on deleting the
- * item (use mod < 0) or changing the options (use mod > 0)
+ * Lookup for a directory index item by name and index number.
  *
- * The name is used to make sure the index really points to the name you were
- * looking for.
+ * @trans:     The transaction handle to use. Can be NULL if @mod is 0.
+ * @root:      The root of the target tree.
+ * @path:      Path to use for the search.
+ * @dir:       The inode number (objectid) of the directory.
+ * @index:     The index number.
+ * @name:      The name associated to the directory entry we are looking for.
+ * @name_len:  The length of the name.
+ * @mod:       Used to indicate if the tree search is meant for a read only
+ *             lookup, for a modification lookup or for a deletion lookup, so
+ *             its value should be 0, 1 or -1, respectively.
+ *
+ * Returns: NULL if the dir index item does not exists, an error pointer if an
+ * error happened, or a pointer to a dir item if the dir index item exists and
+ * matches the criteria (name and index number).
  */
 struct btrfs_dir_item *
 btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
                            struct btrfs_root *root,
                            struct btrfs_path *path, u64 dir,
-                           u64 objectid, const char *name, int name_len,
+                           u64 index, const char *name, int name_len,
                            int mod)
 {
+       struct btrfs_dir_item *di;
        struct btrfs_key key;
 
        key.objectid = dir;
        key.type = BTRFS_DIR_INDEX_KEY;
-       key.offset = objectid;
+       key.offset = index;
 
-       return btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
+       di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
+       if (di == ERR_PTR(-ENOENT))
+               return NULL;
+
+       return di;
 }
 
 struct btrfs_dir_item *
index fc3da7585fb786be71f0308541a4d21b4b96305f..0ab456cb4bf801fc9f6f89623100ea182964b8f4 100644 (file)
@@ -4859,6 +4859,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
 out_free_delayed:
        btrfs_free_delayed_extent_op(extent_op);
 out_free_buf:
+       btrfs_tree_unlock(buf);
        free_extent_buffer(buf);
 out_free_reserved:
        btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
index 7ff577005d0febbf6be14c21a2d1c81f3e600c50..a1762363f61faff8248c9fc4e7cda9503c775da5 100644 (file)
@@ -734,8 +734,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
        if (args->start >= inode->disk_i_size && !args->replace_extent)
                modify_tree = 0;
 
-       update_refs = (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
-                      root == fs_info->tree_root);
+       update_refs = (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID);
        while (1) {
                recow = 0;
                ret = btrfs_lookup_file_extent(trans, root, path, ino,
@@ -2704,14 +2703,16 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
                                                 drop_args.bytes_found);
                if (ret != -ENOSPC) {
                        /*
-                        * When cloning we want to avoid transaction aborts when
-                        * nothing was done and we are attempting to clone parts
-                        * of inline extents, in such cases -EOPNOTSUPP is
-                        * returned by __btrfs_drop_extents() without having
-                        * changed anything in the file.
+                        * The only time we don't want to abort is if we are
+                        * attempting to clone a partial inline extent, in which
+                        * case we'll get EOPNOTSUPP.  However if we aren't
+                        * clone we need to abort no matter what, because if we
+                        * got EOPNOTSUPP via prealloc then we messed up and
+                        * need to abort.
                         */
-                       if (extent_info && !extent_info->is_new_extent &&
-                           ret && ret != -EOPNOTSUPP)
+                       if (ret &&
+                           (ret != -EOPNOTSUPP ||
+                            (extent_info && extent_info->is_new_extent)))
                                btrfs_abort_transaction(trans, ret);
                        break;
                }
index f7efc26aa82a114e13e1816e40f881e840790108..b415c5ec03ea0642c4053c07fe857a2540f10a09 100644 (file)
@@ -939,9 +939,11 @@ out:
 }
 
 /*
- * helper function to see if a given name and sequence number found
- * in an inode back reference are already in a directory and correctly
- * point to this inode
+ * See if a given name and sequence number found in an inode back reference are
+ * already in a directory and correctly point to this inode.
+ *
+ * Returns: < 0 on error, 0 if the directory entry does not exists and 1 if it
+ * exists.
  */
 static noinline int inode_in_dir(struct btrfs_root *root,
                                 struct btrfs_path *path,
@@ -950,29 +952,34 @@ static noinline int inode_in_dir(struct btrfs_root *root,
 {
        struct btrfs_dir_item *di;
        struct btrfs_key location;
-       int match = 0;
+       int ret = 0;
 
        di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
                                         index, name, name_len, 0);
-       if (di && !IS_ERR(di)) {
+       if (IS_ERR(di)) {
+               ret = PTR_ERR(di);
+               goto out;
+       } else if (di) {
                btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
                if (location.objectid != objectid)
                        goto out;
-       } else
+       } else {
                goto out;
-       btrfs_release_path(path);
+       }
 
+       btrfs_release_path(path);
        di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
-       if (di && !IS_ERR(di)) {
-               btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
-               if (location.objectid != objectid)
-                       goto out;
-       } else
+       if (IS_ERR(di)) {
+               ret = PTR_ERR(di);
                goto out;
-       match = 1;
+       } else if (di) {
+               btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
+               if (location.objectid == objectid)
+                       ret = 1;
+       }
 out:
        btrfs_release_path(path);
-       return match;
+       return ret;
 }
 
 /*
@@ -1182,7 +1189,9 @@ next:
        /* look for a conflicting sequence number */
        di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
                                         ref_index, name, namelen, 0);
-       if (di && !IS_ERR(di)) {
+       if (IS_ERR(di)) {
+               return PTR_ERR(di);
+       } else if (di) {
                ret = drop_one_dir_item(trans, root, path, dir, di);
                if (ret)
                        return ret;
@@ -1192,7 +1201,9 @@ next:
        /* look for a conflicting name */
        di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
                                   name, namelen, 0);
-       if (di && !IS_ERR(di)) {
+       if (IS_ERR(di)) {
+               return PTR_ERR(di);
+       } else if (di) {
                ret = drop_one_dir_item(trans, root, path, dir, di);
                if (ret)
                        return ret;
@@ -1517,10 +1528,12 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
                if (ret)
                        goto out;
 
-               /* if we already have a perfect match, we're done */
-               if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
-                                       btrfs_ino(BTRFS_I(inode)), ref_index,
-                                       name, namelen)) {
+               ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
+                                  btrfs_ino(BTRFS_I(inode)), ref_index,
+                                  name, namelen);
+               if (ret < 0) {
+                       goto out;
+               } else if (ret == 0) {
                        /*
                         * look for a conflicting back reference in the
                         * metadata. if we find one we have to unlink that name
@@ -1580,6 +1593,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
                        if (ret)
                                goto out;
                }
+               /* Else, ret == 1, we already have a perfect match, we're done. */
 
                ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
                kfree(name);
@@ -1936,8 +1950,8 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
        struct btrfs_key log_key;
        struct inode *dir;
        u8 log_type;
-       int exists;
-       int ret = 0;
+       bool exists;
+       int ret;
        bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
        bool name_added = false;
 
@@ -1957,12 +1971,12 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
                   name_len);
 
        btrfs_dir_item_key_to_cpu(eb, di, &log_key);
-       exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
-       if (exists == 0)
-               exists = 1;
-       else
-               exists = 0;
+       ret = btrfs_lookup_inode(trans, root, path, &log_key, 0);
        btrfs_release_path(path);
+       if (ret < 0)
+               goto out;
+       exists = (ret == 0);
+       ret = 0;
 
        if (key->type == BTRFS_DIR_ITEM_KEY) {
                dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
@@ -1977,7 +1991,11 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
                ret = -EINVAL;
                goto out;
        }
-       if (IS_ERR_OR_NULL(dst_di)) {
+
+       if (IS_ERR(dst_di)) {
+               ret = PTR_ERR(dst_di);
+               goto out;
+       } else if (!dst_di) {
                /* we need a sequence number to insert, so we only
                 * do inserts for the BTRFS_DIR_INDEX_KEY types
                 */
@@ -2281,7 +2299,7 @@ again:
                                                     dir_key->offset,
                                                     name, name_len, 0);
                }
-               if (!log_di || log_di == ERR_PTR(-ENOENT)) {
+               if (!log_di) {
                        btrfs_dir_item_key_to_cpu(eb, di, &location);
                        btrfs_release_path(path);
                        btrfs_release_path(log_path);
@@ -3540,8 +3558,7 @@ out_unlock:
        if (err == -ENOSPC) {
                btrfs_set_log_full_commit(trans);
                err = 0;
-       } else if (err < 0 && err != -ENOENT) {
-               /* ENOENT can be returned if the entry hasn't been fsynced yet */
+       } else if (err < 0) {
                btrfs_abort_transaction(trans, err);
        }
 
index 3e42d0466521fee94b01cf3273e4354257db6db4..8f537f1d9d1d3417e8dd627c50fc957592b65342 100644 (file)
@@ -2330,7 +2330,6 @@ retry:
 
 int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
-       struct ceph_file_info *fi = file->private_data;
        struct inode *inode = file->f_mapping->host;
        struct ceph_inode_info *ci = ceph_inode(inode);
        u64 flush_tid;
@@ -2365,14 +2364,9 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
        if (err < 0)
                ret = err;
 
-       if (errseq_check(&ci->i_meta_err, READ_ONCE(fi->meta_err))) {
-               spin_lock(&file->f_lock);
-               err = errseq_check_and_advance(&ci->i_meta_err,
-                                              &fi->meta_err);
-               spin_unlock(&file->f_lock);
-               if (err < 0)
-                       ret = err;
-       }
+       err = file_check_and_advance_wb_err(file);
+       if (err < 0)
+               ret = err;
 out:
        dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
        return ret;
index d16fd2d5fd426205c8d3678430b677781bdea6dd..e61018d9764ee221ec5ee396f45ec01277211c8d 100644 (file)
@@ -233,7 +233,6 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
 
        spin_lock_init(&fi->rw_contexts_lock);
        INIT_LIST_HEAD(&fi->rw_contexts);
-       fi->meta_err = errseq_sample(&ci->i_meta_err);
        fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
 
        return 0;
index 2df1e1284451e66999ac34465e1e7617a13d5fd9..1c7574105478fb9ca167b6ab1842fdfaf6daca63 100644 (file)
@@ -541,8 +541,6 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
 
        ceph_fscache_inode_init(ci);
 
-       ci->i_meta_err = 0;
-
        return &ci->vfs_inode;
 }
 
index 7cad180d6debc5a52be66ba082965201a6490e61..d64413adc0fd24d335564a72e2e9cdfb38062bee 100644 (file)
@@ -1493,7 +1493,6 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
 {
        struct ceph_mds_request *req;
        struct rb_node *p;
-       struct ceph_inode_info *ci;
 
        dout("cleanup_session_requests mds%d\n", session->s_mds);
        mutex_lock(&mdsc->mutex);
@@ -1502,16 +1501,10 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
                                       struct ceph_mds_request, r_unsafe_item);
                pr_warn_ratelimited(" dropping unsafe request %llu\n",
                                    req->r_tid);
-               if (req->r_target_inode) {
-                       /* dropping unsafe change of inode's attributes */
-                       ci = ceph_inode(req->r_target_inode);
-                       errseq_set(&ci->i_meta_err, -EIO);
-               }
-               if (req->r_unsafe_dir) {
-                       /* dropping unsafe directory operation */
-                       ci = ceph_inode(req->r_unsafe_dir);
-                       errseq_set(&ci->i_meta_err, -EIO);
-               }
+               if (req->r_target_inode)
+                       mapping_set_error(req->r_target_inode->i_mapping, -EIO);
+               if (req->r_unsafe_dir)
+                       mapping_set_error(req->r_unsafe_dir->i_mapping, -EIO);
                __unregister_request(mdsc, req);
        }
        /* zero r_attempts, so kick_requests() will re-send requests */
@@ -1678,7 +1671,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
                spin_unlock(&mdsc->cap_dirty_lock);
 
                if (dirty_dropped) {
-                       errseq_set(&ci->i_meta_err, -EIO);
+                       mapping_set_error(inode->i_mapping, -EIO);
 
                        if (ci->i_wrbuffer_ref_head == 0 &&
                            ci->i_wr_ref == 0 &&
index 9b1b7f4cfdd4b10442412e8cfb5cc4ecfb6130c6..fd8742bae84715ceb13cbabec5be2e5d06c43eb2 100644 (file)
@@ -1002,16 +1002,16 @@ static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
        struct ceph_fs_client *new = fc->s_fs_info;
        struct ceph_mount_options *fsopt = new->mount_options;
        struct ceph_options *opt = new->client->options;
-       struct ceph_fs_client *other = ceph_sb_to_client(sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
 
        dout("ceph_compare_super %p\n", sb);
 
-       if (compare_mount_options(fsopt, opt, other)) {
+       if (compare_mount_options(fsopt, opt, fsc)) {
                dout("monitor(s)/mount options don't match\n");
                return 0;
        }
        if ((opt->flags & CEPH_OPT_FSID) &&
-           ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
+           ceph_fsid_compare(&opt->fsid, &fsc->client->fsid)) {
                dout("fsid doesn't match\n");
                return 0;
        }
@@ -1019,6 +1019,17 @@ static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
                dout("flags differ\n");
                return 0;
        }
+
+       if (fsc->blocklisted && !ceph_test_mount_opt(fsc, CLEANRECOVER)) {
+               dout("client is blocklisted (and CLEANRECOVER is not set)\n");
+               return 0;
+       }
+
+       if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) {
+               dout("client has been forcibly unmounted\n");
+               return 0;
+       }
+
        return 1;
 }
 
index a40eb14c282af3604223fa47dcf99e0531e52b11..14f951cd5b61bf8042342fdd483b9d923f54c27e 100644 (file)
@@ -429,8 +429,6 @@ struct ceph_inode_info {
 #ifdef CONFIG_CEPH_FSCACHE
        struct fscache_cookie *fscache;
 #endif
-       errseq_t i_meta_err;
-
        struct inode vfs_inode; /* at end */
 };
 
@@ -774,7 +772,6 @@ struct ceph_file_info {
        spinlock_t rw_contexts_lock;
        struct list_head rw_contexts;
 
-       errseq_t meta_err;
        u32 filp_gen;
        atomic_t num_locks;
 };
index 8129a430d789d92865a5ad71c0d5d9af43a87509..2f117c57160dc01c8f1cd09620eabf31ad84de45 100644 (file)
@@ -528,7 +528,7 @@ void debugfs_create_file_size(const char *name, umode_t mode,
 {
        struct dentry *de = debugfs_create_file(name, mode, parent, data, fops);
 
-       if (de)
+       if (!IS_ERR(de))
                d_inode(de)->i_size = file_size;
 }
 EXPORT_SYMBOL_GPL(debugfs_create_file_size);
index ffb295aa891c03e00e938046363b6d7c46f0b4d5..74b172a4adda3a60a80c6a3c6300b57f0e7b37f4 100644 (file)
@@ -551,7 +551,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
        struct dir_private_info *info = file->private_data;
        struct inode *inode = file_inode(file);
        struct fname *fname;
-       int     ret;
+       int ret = 0;
 
        if (!info) {
                info = ext4_htree_create_dir_info(file, ctx->pos);
@@ -599,7 +599,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
                                                   info->curr_minor_hash,
                                                   &info->next_hash);
                        if (ret < 0)
-                               return ret;
+                               goto finished;
                        if (ret == 0) {
                                ctx->pos = ext4_get_htree_eof(file);
                                break;
@@ -630,7 +630,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
        }
 finished:
        info->last_pos = ctx->pos;
-       return 0;
+       return ret < 0 ? ret : 0;
 }
 
 static int ext4_release_dir(struct inode *inode, struct file *filp)
index 90ff5acaf11ffc442c0f0d4db545d287e51b272d..3825195539d7438a2db2b112b75e3af50b4ccccd 100644 (file)
@@ -3593,9 +3593,6 @@ extern int ext4_da_write_inline_data_begin(struct address_space *mapping,
                                           unsigned flags,
                                           struct page **pagep,
                                           void **fsdata);
-extern int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
-                                        unsigned len, unsigned copied,
-                                        struct page *page);
 extern int ext4_try_add_inline_entry(handle_t *handle,
                                     struct ext4_filename *fname,
                                     struct inode *dir, struct inode *inode);
index c0de30f25185060a083aef4199086fea38819375..0e02571f2f828052111f36b066024ff7d98573a6 100644 (file)
@@ -5916,7 +5916,7 @@ void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end)
 }
 
 /* Check if *cur is a hole and if it is, skip it */
-static void skip_hole(struct inode *inode, ext4_lblk_t *cur)
+static int skip_hole(struct inode *inode, ext4_lblk_t *cur)
 {
        int ret;
        struct ext4_map_blocks map;
@@ -5925,9 +5925,12 @@ static void skip_hole(struct inode *inode, ext4_lblk_t *cur)
        map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur;
 
        ret = ext4_map_blocks(NULL, inode, &map, 0);
+       if (ret < 0)
+               return ret;
        if (ret != 0)
-               return;
+               return 0;
        *cur = *cur + map.m_len;
+       return 0;
 }
 
 /* Count number of blocks used by this inode and update i_blocks */
@@ -5976,7 +5979,9 @@ int ext4_ext_replay_set_iblocks(struct inode *inode)
         * iblocks by total number of differences found.
         */
        cur = 0;
-       skip_hole(inode, &cur);
+       ret = skip_hole(inode, &cur);
+       if (ret < 0)
+               goto out;
        path = ext4_find_extent(inode, cur, NULL, 0);
        if (IS_ERR(path))
                goto out;
@@ -5995,8 +6000,12 @@ int ext4_ext_replay_set_iblocks(struct inode *inode)
                }
                cur = max(cur + 1, le32_to_cpu(ex->ee_block) +
                                        ext4_ext_get_actual_len(ex));
-               skip_hole(inode, &cur);
-
+               ret = skip_hole(inode, &cur);
+               if (ret < 0) {
+                       ext4_ext_drop_refs(path);
+                       kfree(path);
+                       break;
+               }
                path2 = ext4_find_extent(inode, cur, NULL, 0);
                if (IS_ERR(path2)) {
                        ext4_ext_drop_refs(path);
index 8e610a381862f8db5029858f45dcdb5277b76622..8ea5a81e655489e48a1adbcdc27b7f653a88b3b4 100644 (file)
@@ -892,6 +892,12 @@ static int ext4_fc_write_inode_data(struct inode *inode, u32 *crc)
                                            sizeof(lrange), (u8 *)&lrange, crc))
                                return -ENOSPC;
                } else {
+                       unsigned int max = (map.m_flags & EXT4_MAP_UNWRITTEN) ?
+                               EXT_UNWRITTEN_MAX_LEN : EXT_INIT_MAX_LEN;
+
+                       /* Limit the number of blocks in one extent */
+                       map.m_len = min(max, map.m_len);
+
                        fc_ext.fc_ino = cpu_to_le32(inode->i_ino);
                        ex = (struct ext4_extent *)&fc_ext.fc_ex;
                        ex->ee_block = cpu_to_le32(map.m_lblk);
index 82bf4ff6be28e2850ebc310b657cae1397f13e37..39a1ab129fdc94b2caa18a7febe8a5b0f8ffa7b4 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/iomap.h>
 #include <linux/fiemap.h>
 #include <linux/iversion.h>
+#include <linux/backing-dev.h>
 
 #include "ext4_jbd2.h"
 #include "ext4.h"
@@ -733,45 +734,83 @@ convert:
 int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
                               unsigned copied, struct page *page)
 {
-       int ret, no_expand;
+       handle_t *handle = ext4_journal_current_handle();
+       int no_expand;
        void *kaddr;
        struct ext4_iloc iloc;
+       int ret = 0, ret2;
+
+       if (unlikely(copied < len) && !PageUptodate(page))
+               copied = 0;
 
-       if (unlikely(copied < len)) {
-               if (!PageUptodate(page)) {
-                       copied = 0;
+       if (likely(copied)) {
+               ret = ext4_get_inode_loc(inode, &iloc);
+               if (ret) {
+                       unlock_page(page);
+                       put_page(page);
+                       ext4_std_error(inode->i_sb, ret);
                        goto out;
                }
-       }
+               ext4_write_lock_xattr(inode, &no_expand);
+               BUG_ON(!ext4_has_inline_data(inode));
 
-       ret = ext4_get_inode_loc(inode, &iloc);
-       if (ret) {
-               ext4_std_error(inode->i_sb, ret);
-               copied = 0;
-               goto out;
-       }
+               /*
+                * ei->i_inline_off may have changed since
+                * ext4_write_begin() called
+                * ext4_try_to_write_inline_data()
+                */
+               (void) ext4_find_inline_data_nolock(inode);
 
-       ext4_write_lock_xattr(inode, &no_expand);
-       BUG_ON(!ext4_has_inline_data(inode));
+               kaddr = kmap_atomic(page);
+               ext4_write_inline_data(inode, &iloc, kaddr, pos, copied);
+               kunmap_atomic(kaddr);
+               SetPageUptodate(page);
+               /* clear page dirty so that writepages wouldn't work for us. */
+               ClearPageDirty(page);
 
-       /*
-        * ei->i_inline_off may have changed since ext4_write_begin()
-        * called ext4_try_to_write_inline_data()
-        */
-       (void) ext4_find_inline_data_nolock(inode);
+               ext4_write_unlock_xattr(inode, &no_expand);
+               brelse(iloc.bh);
 
-       kaddr = kmap_atomic(page);
-       ext4_write_inline_data(inode, &iloc, kaddr, pos, len);
-       kunmap_atomic(kaddr);
-       SetPageUptodate(page);
-       /* clear page dirty so that writepages wouldn't work for us. */
-       ClearPageDirty(page);
+               /*
+                * It's important to update i_size while still holding page
+                * lock: page writeout could otherwise come in and zero
+                * beyond i_size.
+                */
+               ext4_update_inode_size(inode, pos + copied);
+       }
+       unlock_page(page);
+       put_page(page);
 
-       ext4_write_unlock_xattr(inode, &no_expand);
-       brelse(iloc.bh);
-       mark_inode_dirty(inode);
+       /*
+        * Don't mark the inode dirty under page lock. First, it unnecessarily
+        * makes the holding time of page lock longer. Second, it forces lock
+        * ordering of page lock and transaction start for journaling
+        * filesystems.
+        */
+       if (likely(copied))
+               mark_inode_dirty(inode);
 out:
-       return copied;
+       /*
+        * If we didn't copy as much data as expected, we need to trim back
+        * size of xattr containing inline data.
+        */
+       if (pos + len > inode->i_size && ext4_can_truncate(inode))
+               ext4_orphan_add(handle, inode);
+
+       ret2 = ext4_journal_stop(handle);
+       if (!ret)
+               ret = ret2;
+       if (pos + len > inode->i_size) {
+               ext4_truncate_failed_write(inode);
+               /*
+                * If truncate failed early the inode might still be
+                * on the orphan list; we need to make sure the inode
+                * is removed from the orphan list in that case.
+                */
+               if (inode->i_nlink)
+                       ext4_orphan_del(NULL, inode);
+       }
+       return ret ? ret : copied;
 }
 
 struct buffer_head *
@@ -953,43 +992,6 @@ out:
        return ret;
 }
 
-int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
-                                 unsigned len, unsigned copied,
-                                 struct page *page)
-{
-       int ret;
-
-       ret = ext4_write_inline_data_end(inode, pos, len, copied, page);
-       if (ret < 0) {
-               unlock_page(page);
-               put_page(page);
-               return ret;
-       }
-       copied = ret;
-
-       /*
-        * No need to use i_size_read() here, the i_size
-        * cannot change under us because we hold i_mutex.
-        *
-        * But it's important to update i_size while still holding page lock:
-        * page writeout could otherwise come in and zero beyond i_size.
-        */
-       if (pos+copied > inode->i_size)
-               i_size_write(inode, pos+copied);
-       unlock_page(page);
-       put_page(page);
-
-       /*
-        * Don't mark the inode dirty under page lock. First, it unnecessarily
-        * makes the holding time of page lock longer. Second, it forces lock
-        * ordering of page lock and transaction start for journaling
-        * filesystems.
-        */
-       mark_inode_dirty(inode);
-
-       return copied;
-}
-
 #ifdef INLINE_DIR_DEBUG
 void ext4_show_inline_dir(struct inode *dir, struct buffer_head *bh,
                          void *inline_start, int inline_size)
@@ -1917,6 +1919,24 @@ int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
        EXT4_I(inode)->i_disksize = i_size;
 
        if (i_size < inline_size) {
+               /*
+                * if there's inline data to truncate and this file was
+                * converted to extents after that inline data was written,
+                * the extent status cache must be cleared to avoid leaving
+                * behind stale delayed allocated extent entries
+                */
+               if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
+retry:
+                       err = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
+                       if (err == -ENOMEM) {
+                               cond_resched();
+                               congestion_wait(BLK_RW_ASYNC, HZ/50);
+                               goto retry;
+                       }
+                       if (err)
+                               goto out_error;
+               }
+
                /* Clear the content in the xattr space. */
                if (inline_size > EXT4_MIN_INLINE_DATA_SIZE) {
                        if ((err = ext4_xattr_ibody_find(inode, &i, &is)) != 0)
index d18852d6029c194ae29e55a84353c11d5cb92365..0f06305167d5a301dd1ef4f707e9653fc6e75fbd 100644 (file)
@@ -1284,22 +1284,14 @@ static int ext4_write_end(struct file *file,
        loff_t old_size = inode->i_size;
        int ret = 0, ret2;
        int i_size_changed = 0;
-       int inline_data = ext4_has_inline_data(inode);
        bool verity = ext4_verity_in_progress(inode);
 
        trace_ext4_write_end(inode, pos, len, copied);
-       if (inline_data) {
-               ret = ext4_write_inline_data_end(inode, pos, len,
-                                                copied, page);
-               if (ret < 0) {
-                       unlock_page(page);
-                       put_page(page);
-                       goto errout;
-               }
-               copied = ret;
-       } else
-               copied = block_write_end(file, mapping, pos,
-                                        len, copied, page, fsdata);
+
+       if (ext4_has_inline_data(inode))
+               return ext4_write_inline_data_end(inode, pos, len, copied, page);
+
+       copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
        /*
         * it's important to update i_size while still holding page lock:
         * page writeout could otherwise come in and zero beyond i_size.
@@ -1320,7 +1312,7 @@ static int ext4_write_end(struct file *file,
         * ordering of page lock and transaction start for journaling
         * filesystems.
         */
-       if (i_size_changed || inline_data)
+       if (i_size_changed)
                ret = ext4_mark_inode_dirty(handle, inode);
 
        if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
@@ -1329,7 +1321,7 @@ static int ext4_write_end(struct file *file,
                 * inode->i_size. So truncate them
                 */
                ext4_orphan_add(handle, inode);
-errout:
+
        ret2 = ext4_journal_stop(handle);
        if (!ret)
                ret = ret2;
@@ -1395,7 +1387,6 @@ static int ext4_journalled_write_end(struct file *file,
        int partial = 0;
        unsigned from, to;
        int size_changed = 0;
-       int inline_data = ext4_has_inline_data(inode);
        bool verity = ext4_verity_in_progress(inode);
 
        trace_ext4_journalled_write_end(inode, pos, len, copied);
@@ -1404,16 +1395,10 @@ static int ext4_journalled_write_end(struct file *file,
 
        BUG_ON(!ext4_handle_valid(handle));
 
-       if (inline_data) {
-               ret = ext4_write_inline_data_end(inode, pos, len,
-                                                copied, page);
-               if (ret < 0) {
-                       unlock_page(page);
-                       put_page(page);
-                       goto errout;
-               }
-               copied = ret;
-       } else if (unlikely(copied < len) && !PageUptodate(page)) {
+       if (ext4_has_inline_data(inode))
+               return ext4_write_inline_data_end(inode, pos, len, copied, page);
+
+       if (unlikely(copied < len) && !PageUptodate(page)) {
                copied = 0;
                ext4_journalled_zero_new_buffers(handle, inode, page, from, to);
        } else {
@@ -1436,7 +1421,7 @@ static int ext4_journalled_write_end(struct file *file,
        if (old_size < pos && !verity)
                pagecache_isize_extended(inode, old_size, pos);
 
-       if (size_changed || inline_data) {
+       if (size_changed) {
                ret2 = ext4_mark_inode_dirty(handle, inode);
                if (!ret)
                        ret = ret2;
@@ -1449,7 +1434,6 @@ static int ext4_journalled_write_end(struct file *file,
                 */
                ext4_orphan_add(handle, inode);
 
-errout:
        ret2 = ext4_journal_stop(handle);
        if (!ret)
                ret = ret2;
@@ -1644,6 +1628,7 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
        int ret;
        bool allocated = false;
+       bool reserved = false;
 
        /*
         * If the cluster containing lblk is shared with a delayed,
@@ -1660,6 +1645,7 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
                ret = ext4_da_reserve_space(inode);
                if (ret != 0)   /* ENOSPC */
                        goto errout;
+               reserved = true;
        } else {   /* bigalloc */
                if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
                        if (!ext4_es_scan_clu(inode,
@@ -1672,6 +1658,7 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
                                        ret = ext4_da_reserve_space(inode);
                                        if (ret != 0)   /* ENOSPC */
                                                goto errout;
+                                       reserved = true;
                                } else {
                                        allocated = true;
                                }
@@ -1682,6 +1669,8 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
        }
 
        ret = ext4_es_insert_delayed_block(inode, lblk, allocated);
+       if (ret && reserved)
+               ext4_da_release_space(inode, 1);
 
 errout:
        return ret;
@@ -1722,13 +1711,16 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
                }
 
                /*
-                * Delayed extent could be allocated by fallocate.
-                * So we need to check it.
+                * the buffer head associated with a delayed and not unwritten
+                * block found in the extent status cache must contain an
+                * invalid block number and have its BH_New and BH_Delay bits
+                * set, reflecting the state assigned when the block was
+                * initially delayed allocated
                 */
-               if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
-                       map_bh(bh, inode->i_sb, invalid_block);
-                       set_buffer_new(bh);
-                       set_buffer_delay(bh);
+               if (ext4_es_is_delonly(&es)) {
+                       BUG_ON(bh->b_blocknr != invalid_block);
+                       BUG_ON(!buffer_new(bh));
+                       BUG_ON(!buffer_delay(bh));
                        return 0;
                }
 
@@ -2932,19 +2924,6 @@ static int ext4_nonda_switch(struct super_block *sb)
        return 0;
 }
 
-/* We always reserve for an inode update; the superblock could be there too */
-static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)
-{
-       if (likely(ext4_has_feature_large_file(inode->i_sb)))
-               return 1;
-
-       if (pos + len <= 0x7fffffffULL)
-               return 1;
-
-       /* We might need to update the superblock to set LARGE_FILE */
-       return 2;
-}
-
 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
                               loff_t pos, unsigned len, unsigned flags,
                               struct page **pagep, void **fsdata)
@@ -2953,7 +2932,6 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
        struct page *page;
        pgoff_t index;
        struct inode *inode = mapping->host;
-       handle_t *handle;
 
        if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
                return -EIO;
@@ -2979,41 +2957,11 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
                        return 0;
        }
 
-       /*
-        * grab_cache_page_write_begin() can take a long time if the
-        * system is thrashing due to memory pressure, or if the page
-        * is being written back.  So grab it first before we start
-        * the transaction handle.  This also allows us to allocate
-        * the page (if needed) without using GFP_NOFS.
-        */
-retry_grab:
+retry:
        page = grab_cache_page_write_begin(mapping, index, flags);
        if (!page)
                return -ENOMEM;
-       unlock_page(page);
 
-       /*
-        * With delayed allocation, we don't log the i_disksize update
-        * if there is delayed block allocation. But we still need
-        * to journalling the i_disksize update if writes to the end
-        * of file which has an already mapped buffer.
-        */
-retry_journal:
-       handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
-                               ext4_da_write_credits(inode, pos, len));
-       if (IS_ERR(handle)) {
-               put_page(page);
-               return PTR_ERR(handle);
-       }
-
-       lock_page(page);
-       if (page->mapping != mapping) {
-               /* The page got truncated from under us */
-               unlock_page(page);
-               put_page(page);
-               ext4_journal_stop(handle);
-               goto retry_grab;
-       }
        /* In case writeback began while the page was unlocked */
        wait_for_stable_page(page);
 
@@ -3025,20 +2973,18 @@ retry_journal:
 #endif
        if (ret < 0) {
                unlock_page(page);
-               ext4_journal_stop(handle);
+               put_page(page);
                /*
                 * block_write_begin may have instantiated a few blocks
                 * outside i_size.  Trim these off again. Don't need
-                * i_size_read because we hold i_mutex.
+                * i_size_read because we hold inode lock.
                 */
                if (pos + len > inode->i_size)
                        ext4_truncate_failed_write(inode);
 
                if (ret == -ENOSPC &&
                    ext4_should_retry_alloc(inode->i_sb, &retries))
-                       goto retry_journal;
-
-               put_page(page);
+                       goto retry;
                return ret;
        }
 
@@ -3075,8 +3021,6 @@ static int ext4_da_write_end(struct file *file,
                             struct page *page, void *fsdata)
 {
        struct inode *inode = mapping->host;
-       int ret = 0, ret2;
-       handle_t *handle = ext4_journal_current_handle();
        loff_t new_i_size;
        unsigned long start, end;
        int write_mode = (int)(unsigned long)fsdata;
@@ -3086,44 +3030,36 @@ static int ext4_da_write_end(struct file *file,
                                      len, copied, page, fsdata);
 
        trace_ext4_da_write_end(inode, pos, len, copied);
+
+       if (write_mode != CONVERT_INLINE_DATA &&
+           ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
+           ext4_has_inline_data(inode))
+               return ext4_write_inline_data_end(inode, pos, len, copied, page);
+
        start = pos & (PAGE_SIZE - 1);
        end = start + copied - 1;
 
        /*
-        * generic_write_end() will run mark_inode_dirty() if i_size
-        * changes.  So let's piggyback the i_disksize mark_inode_dirty
-        * into that.
+        * Since we are holding inode lock, we are sure i_disksize <=
+        * i_size. We also know that if i_disksize < i_size, there are
+        * delalloc writes pending in the range upto i_size. If the end of
+        * the current write is <= i_size, there's no need to touch
+        * i_disksize since writeback will push i_disksize upto i_size
+        * eventually. If the end of the current write is > i_size and
+        * inside an allocated block (ext4_da_should_update_i_disksize()
+        * check), we need to update i_disksize here as neither
+        * ext4_writepage() nor certain ext4_writepages() paths not
+        * allocating blocks update i_disksize.
+        *
+        * Note that we defer inode dirtying to generic_write_end() /
+        * ext4_da_write_inline_data_end().
         */
        new_i_size = pos + copied;
-       if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
-               if (ext4_has_inline_data(inode) ||
-                   ext4_da_should_update_i_disksize(page, end)) {
-                       ext4_update_i_disksize(inode, new_i_size);
-                       /* We need to mark inode dirty even if
-                        * new_i_size is less that inode->i_size
-                        * bu greater than i_disksize.(hint delalloc)
-                        */
-                       ret = ext4_mark_inode_dirty(handle, inode);
-               }
-       }
+       if (copied && new_i_size > inode->i_size &&
+           ext4_da_should_update_i_disksize(page, end))
+               ext4_update_i_disksize(inode, new_i_size);
 
-       if (write_mode != CONVERT_INLINE_DATA &&
-           ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
-           ext4_has_inline_data(inode))
-               ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied,
-                                                    page);
-       else
-               ret2 = generic_write_end(file, mapping, pos, len, copied,
-                                                       page, fsdata);
-
-       copied = ret2;
-       if (ret2 < 0)
-               ret = ret2;
-       ret2 = ext4_journal_stop(handle);
-       if (unlikely(ret2 && !ret))
-               ret = ret2;
-
-       return ret ? ret : copied;
+       return generic_write_end(file, mapping, pos, len, copied, page, fsdata);
 }
 
 /*
@@ -4340,6 +4276,12 @@ static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino,
                goto has_buffer;
 
        lock_buffer(bh);
+       if (ext4_buffer_uptodate(bh)) {
+               /* Someone brought it uptodate while we waited */
+               unlock_buffer(bh);
+               goto has_buffer;
+       }
+
        /*
         * If we have all information of the inode in memory and this
         * is the only valid inode in the block, we need not read the
index 0775950ee84e311980d29fb8fca95385cb8e10ff..88d5d274a86843293005590709eb5e77d958a179 100644 (file)
@@ -658,7 +658,7 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
                 * constraints, it may not be safe to do it right here so we
                 * defer superblock flushing to a workqueue.
                 */
-               if (continue_fs)
+               if (continue_fs && journal)
                        schedule_work(&EXT4_SB(sb)->s_error_work);
                else
                        ext4_commit_super(sb);
@@ -1350,6 +1350,12 @@ static void ext4_destroy_inode(struct inode *inode)
                                true);
                dump_stack();
        }
+
+       if (EXT4_I(inode)->i_reserved_data_blocks)
+               ext4_msg(inode->i_sb, KERN_ERR,
+                        "Inode %lu (%p): i_reserved_data_blocks (%u) not cleared!",
+                        inode->i_ino, EXT4_I(inode),
+                        EXT4_I(inode)->i_reserved_data_blocks);
 }
 
 static void init_once(void *foo)
@@ -3021,17 +3027,17 @@ static loff_t ext4_max_size(int blkbits, int has_huge_files)
  */
 static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
 {
-       loff_t res = EXT4_NDIR_BLOCKS;
+       unsigned long long upper_limit, res = EXT4_NDIR_BLOCKS;
        int meta_blocks;
-       loff_t upper_limit;
-       /* This is calculated to be the largest file size for a dense, block
+
+       /*
+        * This is calculated to be the largest file size for a dense, block
         * mapped file such that the file's total number of 512-byte sectors,
         * including data and all indirect blocks, does not exceed (2^48 - 1).
         *
         * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
         * number of 512-byte sectors of the file.
         */
-
        if (!has_huge_files) {
                /*
                 * !has_huge_files or implies that the inode i_block field
@@ -3074,7 +3080,7 @@ static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
        if (res > MAX_LFS_FILESIZE)
                res = MAX_LFS_FILESIZE;
 
-       return res;
+       return (loff_t)res;
 }
 
 static ext4_fsblk_t descriptor_loc(struct super_block *sb,
@@ -5042,12 +5048,15 @@ failed_mount_wq:
        sbi->s_ea_block_cache = NULL;
 
        if (sbi->s_journal) {
+               /* flush s_error_work before journal destroy. */
+               flush_work(&sbi->s_error_work);
                jbd2_journal_destroy(sbi->s_journal);
                sbi->s_journal = NULL;
        }
 failed_mount3a:
        ext4_es_unregister_shrinker(sbi);
 failed_mount3:
+       /* flush s_error_work before sbi destroy */
        flush_work(&sbi->s_error_work);
        del_timer_sync(&sbi->s_err_report);
        ext4_stop_mmpd(sbi);
index f346a78f4bd67a1b6243aae4da7e1f22bb78bc7b..6a675652129b2193235c234dd4d09ab2a095e93a 100644 (file)
@@ -77,7 +77,6 @@ static WORK_STATE(INIT_OBJECT,                "INIT", fscache_initialise_object);
 static WORK_STATE(PARENT_READY,                "PRDY", fscache_parent_ready);
 static WORK_STATE(ABORT_INIT,          "ABRT", fscache_abort_initialisation);
 static WORK_STATE(LOOK_UP_OBJECT,      "LOOK", fscache_look_up_object);
-static WORK_STATE(CREATE_OBJECT,       "CRTO", fscache_look_up_object);
 static WORK_STATE(OBJECT_AVAILABLE,    "AVBL", fscache_object_available);
 static WORK_STATE(JUMPSTART_DEPS,      "JUMP", fscache_jumpstart_dependents);
 
@@ -907,6 +906,7 @@ static void fscache_dequeue_object(struct fscache_object *object)
  * @object: The object to ask about
  * @data: The auxiliary data for the object
  * @datalen: The size of the auxiliary data
+ * @object_size: The size of the object according to the server.
  *
  * This function consults the netfs about the coherency state of an object.
  * The caller must be holding a ref on cookie->n_active (held by
index 4338771077008fd947e9f9e4b368f263bba42895..e002cdfaf3cc77d91c21a197a62e6eafa0e4f265 100644 (file)
@@ -22,7 +22,10 @@ static void fscache_operation_dummy_cancel(struct fscache_operation *op)
 
 /**
  * fscache_operation_init - Do basic initialisation of an operation
+ * @cookie: The cookie to operate on
  * @op: The operation to initialise
+ * @processor: The function to perform the operation
+ * @cancel: A function to handle operation cancellation
  * @release: The release function to assign
  *
  * Do basic initialisation of an operation.  The caller must still set flags,
index 319596df5dc602ab529cc14a09c02597fd7fd0f0..f55f9f94b1a4fbf2c0fbdc67a3a80a6739dc48f3 100644 (file)
@@ -1121,6 +1121,9 @@ int fuse_init_fs_context_submount(struct fs_context *fsc);
  */
 void fuse_conn_destroy(struct fuse_mount *fm);
 
+/* Drop the connection and free the fuse mount */
+void fuse_mount_destroy(struct fuse_mount *fm);
+
 /**
  * Add connection to control filesystem
  */
index 36cd03114b6d9f8b23956ae70966b9c0aeff2d0a..12d49a1914e84801fddf6d9a833409c7e94e9256 100644 (file)
@@ -457,14 +457,6 @@ static void fuse_send_destroy(struct fuse_mount *fm)
        }
 }
 
-static void fuse_put_super(struct super_block *sb)
-{
-       struct fuse_mount *fm = get_fuse_mount_super(sb);
-
-       fuse_conn_put(fm->fc);
-       kfree(fm);
-}
-
 static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr)
 {
        stbuf->f_type    = FUSE_SUPER_MAGIC;
@@ -1003,7 +995,6 @@ static const struct super_operations fuse_super_operations = {
        .evict_inode    = fuse_evict_inode,
        .write_inode    = fuse_write_inode,
        .drop_inode     = generic_delete_inode,
-       .put_super      = fuse_put_super,
        .umount_begin   = fuse_umount_begin,
        .statfs         = fuse_statfs,
        .sync_fs        = fuse_sync_fs,
@@ -1424,20 +1415,17 @@ static int fuse_get_tree_submount(struct fs_context *fsc)
        if (!fm)
                return -ENOMEM;
 
+       fm->fc = fuse_conn_get(fc);
        fsc->s_fs_info = fm;
        sb = sget_fc(fsc, NULL, set_anon_super_fc);
-       if (IS_ERR(sb)) {
-               kfree(fm);
+       if (fsc->s_fs_info)
+               fuse_mount_destroy(fm);
+       if (IS_ERR(sb))
                return PTR_ERR(sb);
-       }
-       fm->fc = fuse_conn_get(fc);
 
        /* Initialize superblock, making @mp_fi its root */
        err = fuse_fill_super_submount(sb, mp_fi);
        if (err) {
-               fuse_conn_put(fc);
-               kfree(fm);
-               sb->s_fs_info = NULL;
                deactivate_locked_super(sb);
                return err;
        }
@@ -1569,8 +1557,6 @@ static int fuse_fill_super(struct super_block *sb, struct fs_context *fsc)
 {
        struct fuse_fs_context *ctx = fsc->fs_private;
        int err;
-       struct fuse_conn *fc;
-       struct fuse_mount *fm;
 
        if (!ctx->file || !ctx->rootmode_present ||
            !ctx->user_id_present || !ctx->group_id_present)
@@ -1580,42 +1566,18 @@ static int fuse_fill_super(struct super_block *sb, struct fs_context *fsc)
         * Require mount to happen from the same user namespace which
         * opened /dev/fuse to prevent potential attacks.
         */
-       err = -EINVAL;
        if ((ctx->file->f_op != &fuse_dev_operations) ||
            (ctx->file->f_cred->user_ns != sb->s_user_ns))
-               goto err;
+               return -EINVAL;
        ctx->fudptr = &ctx->file->private_data;
 
-       fc = kmalloc(sizeof(*fc), GFP_KERNEL);
-       err = -ENOMEM;
-       if (!fc)
-               goto err;
-
-       fm = kzalloc(sizeof(*fm), GFP_KERNEL);
-       if (!fm) {
-               kfree(fc);
-               goto err;
-       }
-
-       fuse_conn_init(fc, fm, sb->s_user_ns, &fuse_dev_fiq_ops, NULL);
-       fc->release = fuse_free_conn;
-
-       sb->s_fs_info = fm;
-
        err = fuse_fill_super_common(sb, ctx);
        if (err)
-               goto err_put_conn;
+               return err;
        /* file->private_data shall be visible on all CPUs after this */
        smp_mb();
        fuse_send_init(get_fuse_mount_super(sb));
        return 0;
-
- err_put_conn:
-       fuse_conn_put(fc);
-       kfree(fm);
-       sb->s_fs_info = NULL;
- err:
-       return err;
 }
 
 /*
@@ -1637,22 +1599,40 @@ static int fuse_get_tree(struct fs_context *fsc)
 {
        struct fuse_fs_context *ctx = fsc->fs_private;
        struct fuse_dev *fud;
+       struct fuse_conn *fc;
+       struct fuse_mount *fm;
        struct super_block *sb;
        int err;
 
+       fc = kmalloc(sizeof(*fc), GFP_KERNEL);
+       if (!fc)
+               return -ENOMEM;
+
+       fm = kzalloc(sizeof(*fm), GFP_KERNEL);
+       if (!fm) {
+               kfree(fc);
+               return -ENOMEM;
+       }
+
+       fuse_conn_init(fc, fm, fsc->user_ns, &fuse_dev_fiq_ops, NULL);
+       fc->release = fuse_free_conn;
+
+       fsc->s_fs_info = fm;
+
        if (ctx->fd_present)
                ctx->file = fget(ctx->fd);
 
        if (IS_ENABLED(CONFIG_BLOCK) && ctx->is_bdev) {
                err = get_tree_bdev(fsc, fuse_fill_super);
-               goto out_fput;
+               goto out;
        }
        /*
         * While block dev mount can be initialized with a dummy device fd
         * (found by device name), normal fuse mounts can't
         */
+       err = -EINVAL;
        if (!ctx->file)
-               return -EINVAL;
+               goto out;
 
        /*
         * Allow creating a fuse mount with an already initialized fuse
@@ -1668,7 +1648,9 @@ static int fuse_get_tree(struct fs_context *fsc)
        } else {
                err = get_tree_nodev(fsc, fuse_fill_super);
        }
-out_fput:
+out:
+       if (fsc->s_fs_info)
+               fuse_mount_destroy(fm);
        if (ctx->file)
                fput(ctx->file);
        return err;
@@ -1747,17 +1729,25 @@ static void fuse_sb_destroy(struct super_block *sb)
        struct fuse_mount *fm = get_fuse_mount_super(sb);
        bool last;
 
-       if (fm) {
+       if (sb->s_root) {
                last = fuse_mount_remove(fm);
                if (last)
                        fuse_conn_destroy(fm);
        }
 }
 
+void fuse_mount_destroy(struct fuse_mount *fm)
+{
+       fuse_conn_put(fm->fc);
+       kfree(fm);
+}
+EXPORT_SYMBOL(fuse_mount_destroy);
+
 static void fuse_kill_sb_anon(struct super_block *sb)
 {
        fuse_sb_destroy(sb);
        kill_anon_super(sb);
+       fuse_mount_destroy(get_fuse_mount_super(sb));
 }
 
 static struct file_system_type fuse_fs_type = {
@@ -1775,6 +1765,7 @@ static void fuse_kill_sb_blk(struct super_block *sb)
 {
        fuse_sb_destroy(sb);
        kill_block_super(sb);
+       fuse_mount_destroy(get_fuse_mount_super(sb));
 }
 
 static struct file_system_type fuseblk_fs_type = {
index 0ad89c6629d74167836246a77c49609cead850af..94fc874f5de7f833390c16405d2671ea52a52da6 100644 (file)
@@ -1394,12 +1394,13 @@ static void virtio_kill_sb(struct super_block *sb)
        bool last;
 
        /* If mount failed, we can still be called without any fc */
-       if (fm) {
+       if (sb->s_root) {
                last = fuse_mount_remove(fm);
                if (last)
                        virtio_fs_conn_destroy(fm);
        }
        kill_anon_super(sb);
+       fuse_mount_destroy(fm);
 }
 
 static int virtio_fs_test_super(struct super_block *sb,
@@ -1455,19 +1456,14 @@ static int virtio_fs_get_tree(struct fs_context *fsc)
 
        fsc->s_fs_info = fm;
        sb = sget_fc(fsc, virtio_fs_test_super, set_anon_super_fc);
-       if (fsc->s_fs_info) {
-               fuse_conn_put(fc);
-               kfree(fm);
-       }
+       if (fsc->s_fs_info)
+               fuse_mount_destroy(fm);
        if (IS_ERR(sb))
                return PTR_ERR(sb);
 
        if (!sb->s_root) {
                err = virtio_fs_fill_super(sb, fsc);
                if (err) {
-                       fuse_conn_put(fc);
-                       kfree(fm);
-                       sb->s_fs_info = NULL;
                        deactivate_locked_super(sb);
                        return err;
                }
index c2360cdc403dd42bbe395b41e4b7de3c9f876cef..422a7ed6a9bdb31e7f79bd7aa2a3cca16fe586ed 100644 (file)
@@ -253,7 +253,7 @@ static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
                pr_warn_once("io-wq is not configured for unbound workers");
 
        raw_spin_lock(&wqe->lock);
-       if (acct->nr_workers == acct->max_workers) {
+       if (acct->nr_workers >= acct->max_workers) {
                raw_spin_unlock(&wqe->lock);
                return true;
        }
@@ -584,10 +584,7 @@ loop:
 
                        if (!get_signal(&ksig))
                                continue;
-                       if (fatal_signal_pending(current) ||
-                           signal_group_exit(current->signal))
-                               break;
-                       continue;
+                       break;
                }
                last_timeout = !ret;
        }
@@ -1294,15 +1291,18 @@ int io_wq_max_workers(struct io_wq *wq, int *new_count)
 
        rcu_read_lock();
        for_each_node(node) {
+               struct io_wqe *wqe = wq->wqes[node];
                struct io_wqe_acct *acct;
 
+               raw_spin_lock(&wqe->lock);
                for (i = 0; i < IO_WQ_ACCT_NR; i++) {
-                       acct = &wq->wqes[node]->acct[i];
+                       acct = &wqe->acct[i];
                        prev = max_t(int, acct->max_workers, prev);
                        if (new_count[i])
                                acct->max_workers = new_count[i];
                        new_count[i] = prev;
                }
+               raw_spin_unlock(&wqe->lock);
        }
        rcu_read_unlock();
        return 0;
index 82f867983bb3279a323a2245220e109e9df01928..bc18af5e0a9345b747c2b8c62d9425c172579acb 100644 (file)
@@ -403,7 +403,6 @@ struct io_ring_ctx {
                struct wait_queue_head  cq_wait;
                unsigned                cq_extra;
                atomic_t                cq_timeouts;
-               struct fasync_struct    *cq_fasync;
                unsigned                cq_last_tm_flush;
        } ____cacheline_aligned_in_smp;
 
@@ -457,6 +456,8 @@ struct io_ring_ctx {
                struct work_struct              exit_work;
                struct list_head                tctx_list;
                struct completion               ref_comp;
+               u32                             iowq_limits[2];
+               bool                            iowq_limits_set;
        };
 };
 
@@ -1369,11 +1370,6 @@ static void io_req_track_inflight(struct io_kiocb *req)
        }
 }
 
-static inline void io_unprep_linked_timeout(struct io_kiocb *req)
-{
-       req->flags &= ~REQ_F_LINK_TIMEOUT;
-}
-
 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
 {
        if (WARN_ON_ONCE(!req->link))
@@ -1614,10 +1610,8 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
                wake_up(&ctx->sq_data->wait);
        if (io_should_trigger_evfd(ctx))
                eventfd_signal(ctx->cq_ev_fd, 1);
-       if (waitqueue_active(&ctx->poll_wait)) {
+       if (waitqueue_active(&ctx->poll_wait))
                wake_up_interruptible(&ctx->poll_wait);
-               kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
-       }
 }
 
 static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
@@ -1631,10 +1625,8 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
        }
        if (io_should_trigger_evfd(ctx))
                eventfd_signal(ctx->cq_ev_fd, 1);
-       if (waitqueue_active(&ctx->poll_wait)) {
+       if (waitqueue_active(&ctx->poll_wait))
                wake_up_interruptible(&ctx->poll_wait);
-               kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
-       }
 }
 
 /* Returns true if there are no backlogged entries after the flush */
@@ -2954,7 +2946,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
                        struct io_ring_ctx *ctx = req->ctx;
 
                        req_set_fail(req);
-                       if (issue_flags & IO_URING_F_NONBLOCK) {
+                       if (!(issue_flags & IO_URING_F_NONBLOCK)) {
                                mutex_lock(&ctx->uring_lock);
                                __io_req_complete(req, issue_flags, ret, cflags);
                                mutex_unlock(&ctx->uring_lock);
@@ -6988,7 +6980,7 @@ issue_sqe:
                switch (io_arm_poll_handler(req)) {
                case IO_APOLL_READY:
                        if (linked_timeout)
-                               io_unprep_linked_timeout(req);
+                               io_queue_linked_timeout(linked_timeout);
                        goto issue_sqe;
                case IO_APOLL_ABORTED:
                        /*
@@ -9345,13 +9337,6 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
        return mask;
 }
 
-static int io_uring_fasync(int fd, struct file *file, int on)
-{
-       struct io_ring_ctx *ctx = file->private_data;
-
-       return fasync_helper(fd, file, on, &ctx->cq_fasync);
-}
-
 static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
 {
        const struct cred *creds;
@@ -9650,7 +9635,16 @@ static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
                ret = io_uring_alloc_task_context(current, ctx);
                if (unlikely(ret))
                        return ret;
+
                tctx = current->io_uring;
+               if (ctx->iowq_limits_set) {
+                       unsigned int limits[2] = { ctx->iowq_limits[0],
+                                                  ctx->iowq_limits[1], };
+
+                       ret = io_wq_max_workers(tctx->io_wq, limits);
+                       if (ret)
+                               return ret;
+               }
        }
        if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
                node = kmalloc(sizeof(*node), GFP_KERNEL);
@@ -10145,7 +10139,6 @@ static const struct file_operations io_uring_fops = {
        .mmap_capabilities = io_uring_nommu_mmap_capabilities,
 #endif
        .poll           = io_uring_poll,
-       .fasync         = io_uring_fasync,
 #ifdef CONFIG_PROC_FS
        .show_fdinfo    = io_uring_show_fdinfo,
 #endif
@@ -10656,7 +10649,9 @@ static int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
 
 static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
                                        void __user *arg)
+       __must_hold(&ctx->uring_lock)
 {
+       struct io_tctx_node *node;
        struct io_uring_task *tctx = NULL;
        struct io_sq_data *sqd = NULL;
        __u32 new_count[2];
@@ -10687,13 +10682,19 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
                tctx = current->io_uring;
        }
 
-       ret = -EINVAL;
-       if (!tctx || !tctx->io_wq)
-               goto err;
+       BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
 
-       ret = io_wq_max_workers(tctx->io_wq, new_count);
-       if (ret)
-               goto err;
+       memcpy(ctx->iowq_limits, new_count, sizeof(new_count));
+       ctx->iowq_limits_set = true;
+
+       ret = -EINVAL;
+       if (tctx && tctx->io_wq) {
+               ret = io_wq_max_workers(tctx->io_wq, new_count);
+               if (ret)
+                       goto err;
+       } else {
+               memset(new_count, 0, sizeof(new_count));
+       }
 
        if (sqd) {
                mutex_unlock(&sqd->lock);
@@ -10703,6 +10704,22 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
        if (copy_to_user(arg, new_count, sizeof(new_count)))
                return -EFAULT;
 
+       /* that's it for SQPOLL, only the SQPOLL task creates requests */
+       if (sqd)
+               return 0;
+
+       /* now propagate the restriction to all registered users */
+       list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
+               struct io_uring_task *tctx = node->task->io_uring;
+
+               if (WARN_ON_ONCE(!tctx->io_wq))
+                       continue;
+
+               for (i = 0; i < ARRAY_SIZE(new_count); i++)
+                       new_count[i] = ctx->iowq_limits[i];
+               /* ignore errors, it always returns zero anyway */
+               (void)io_wq_max_workers(tctx->io_wq, new_count);
+       }
        return 0;
 err:
        if (sqd) {
index 87aac4c72c37da2f82cad443279d66f0b5a2cf8c..1b07550485b96415f0a79a7029b10bf5d78c643e 100644 (file)
@@ -178,7 +178,7 @@ int kernel_read_file_from_fd(int fd, loff_t offset, void **buf,
        struct fd f = fdget(fd);
        int ret = -EBADF;
 
-       if (!f.file)
+       if (!f.file || !(f.file->f_mode & FMODE_READ))
                goto out;
 
        ret = kernel_read_file(f.file, offset, buf, buf_size, file_size, id);
index ba581429bf7b28b6e83e30429e253faa81c27a21..8e0a1378a4b1fe1a19aa4d100661639392fe1557 100644 (file)
@@ -1111,13 +1111,25 @@ static struct dentry *kernfs_iop_lookup(struct inode *dir,
 
        kn = kernfs_find_ns(parent, dentry->d_name.name, ns);
        /* attach dentry and inode */
-       if (kn && kernfs_active(kn)) {
+       if (kn) {
+               /* Inactive nodes are invisible to the VFS so don't
+                * create a negative.
+                */
+               if (!kernfs_active(kn)) {
+                       up_read(&kernfs_rwsem);
+                       return NULL;
+               }
                inode = kernfs_get_inode(dir->i_sb, kn);
                if (!inode)
                        inode = ERR_PTR(-ENOMEM);
        }
-       /* Needed only for negative dentry validation */
-       if (!inode)
+       /*
+        * Needed for negative dentry validation.
+        * The negative dentry can be created in kernfs_iop_lookup()
+        * or transforms from positive dentry in dentry_unlink_inode()
+        * called from vfs_rmdir().
+        */
+       if (!IS_ERR(inode))
                kernfs_set_rev(parent, dentry);
        up_read(&kernfs_rwsem);
 
index de36f12070bf03d304c59b916cfdb2de14d18765..30a92ddc181747e3becf8a419220eab003fffb04 100644 (file)
@@ -68,125 +68,6 @@ void ksmbd_copy_gss_neg_header(void *buf)
        memcpy(buf, NEGOTIATE_GSS_HEADER, AUTH_GSS_LENGTH);
 }
 
-static void
-str_to_key(unsigned char *str, unsigned char *key)
-{
-       int i;
-
-       key[0] = str[0] >> 1;
-       key[1] = ((str[0] & 0x01) << 6) | (str[1] >> 2);
-       key[2] = ((str[1] & 0x03) << 5) | (str[2] >> 3);
-       key[3] = ((str[2] & 0x07) << 4) | (str[3] >> 4);
-       key[4] = ((str[3] & 0x0F) << 3) | (str[4] >> 5);
-       key[5] = ((str[4] & 0x1F) << 2) | (str[5] >> 6);
-       key[6] = ((str[5] & 0x3F) << 1) | (str[6] >> 7);
-       key[7] = str[6] & 0x7F;
-       for (i = 0; i < 8; i++)
-               key[i] = (key[i] << 1);
-}
-
-static int
-smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)
-{
-       unsigned char key2[8];
-       struct des_ctx ctx;
-
-       if (fips_enabled) {
-               ksmbd_debug(AUTH, "FIPS compliance enabled: DES not permitted\n");
-               return -ENOENT;
-       }
-
-       str_to_key(key, key2);
-       des_expand_key(&ctx, key2, DES_KEY_SIZE);
-       des_encrypt(&ctx, out, in);
-       memzero_explicit(&ctx, sizeof(ctx));
-       return 0;
-}
-
-static int ksmbd_enc_p24(unsigned char *p21, const unsigned char *c8, unsigned char *p24)
-{
-       int rc;
-
-       rc = smbhash(p24, c8, p21);
-       if (rc)
-               return rc;
-       rc = smbhash(p24 + 8, c8, p21 + 7);
-       if (rc)
-               return rc;
-       return smbhash(p24 + 16, c8, p21 + 14);
-}
-
-/* produce a md4 message digest from data of length n bytes */
-static int ksmbd_enc_md4(unsigned char *md4_hash, unsigned char *link_str,
-                        int link_len)
-{
-       int rc;
-       struct ksmbd_crypto_ctx *ctx;
-
-       ctx = ksmbd_crypto_ctx_find_md4();
-       if (!ctx) {
-               ksmbd_debug(AUTH, "Crypto md4 allocation error\n");
-               return -ENOMEM;
-       }
-
-       rc = crypto_shash_init(CRYPTO_MD4(ctx));
-       if (rc) {
-               ksmbd_debug(AUTH, "Could not init md4 shash\n");
-               goto out;
-       }
-
-       rc = crypto_shash_update(CRYPTO_MD4(ctx), link_str, link_len);
-       if (rc) {
-               ksmbd_debug(AUTH, "Could not update with link_str\n");
-               goto out;
-       }
-
-       rc = crypto_shash_final(CRYPTO_MD4(ctx), md4_hash);
-       if (rc)
-               ksmbd_debug(AUTH, "Could not generate md4 hash\n");
-out:
-       ksmbd_release_crypto_ctx(ctx);
-       return rc;
-}
-
-static int ksmbd_enc_update_sess_key(unsigned char *md5_hash, char *nonce,
-                                    char *server_challenge, int len)
-{
-       int rc;
-       struct ksmbd_crypto_ctx *ctx;
-
-       ctx = ksmbd_crypto_ctx_find_md5();
-       if (!ctx) {
-               ksmbd_debug(AUTH, "Crypto md5 allocation error\n");
-               return -ENOMEM;
-       }
-
-       rc = crypto_shash_init(CRYPTO_MD5(ctx));
-       if (rc) {
-               ksmbd_debug(AUTH, "Could not init md5 shash\n");
-               goto out;
-       }
-
-       rc = crypto_shash_update(CRYPTO_MD5(ctx), server_challenge, len);
-       if (rc) {
-               ksmbd_debug(AUTH, "Could not update with challenge\n");
-               goto out;
-       }
-
-       rc = crypto_shash_update(CRYPTO_MD5(ctx), nonce, len);
-       if (rc) {
-               ksmbd_debug(AUTH, "Could not update with nonce\n");
-               goto out;
-       }
-
-       rc = crypto_shash_final(CRYPTO_MD5(ctx), md5_hash);
-       if (rc)
-               ksmbd_debug(AUTH, "Could not generate md5 hash\n");
-out:
-       ksmbd_release_crypto_ctx(ctx);
-       return rc;
-}
-
 /**
  * ksmbd_gen_sess_key() - function to generate session key
  * @sess:      session of connection
@@ -324,43 +205,6 @@ out:
        return ret;
 }
 
-/**
- * ksmbd_auth_ntlm() - NTLM authentication handler
- * @sess:      session of connection
- * @pw_buf:    NTLM challenge response
- * @passkey:   user password
- *
- * Return:     0 on success, error number on error
- */
-int ksmbd_auth_ntlm(struct ksmbd_session *sess, char *pw_buf)
-{
-       int rc;
-       unsigned char p21[21];
-       char key[CIFS_AUTH_RESP_SIZE];
-
-       memset(p21, '\0', 21);
-       memcpy(p21, user_passkey(sess->user), CIFS_NTHASH_SIZE);
-       rc = ksmbd_enc_p24(p21, sess->ntlmssp.cryptkey, key);
-       if (rc) {
-               pr_err("password processing failed\n");
-               return rc;
-       }
-
-       ksmbd_enc_md4(sess->sess_key, user_passkey(sess->user),
-                     CIFS_SMB1_SESSKEY_SIZE);
-       memcpy(sess->sess_key + CIFS_SMB1_SESSKEY_SIZE, key,
-              CIFS_AUTH_RESP_SIZE);
-       sess->sequence_number = 1;
-
-       if (strncmp(pw_buf, key, CIFS_AUTH_RESP_SIZE) != 0) {
-               ksmbd_debug(AUTH, "ntlmv1 authentication failed\n");
-               return -EINVAL;
-       }
-
-       ksmbd_debug(AUTH, "ntlmv1 authentication pass\n");
-       return 0;
-}
-
 /**
  * ksmbd_auth_ntlmv2() - NTLMv2 authentication handler
  * @sess:      session of connection
@@ -441,44 +285,6 @@ out:
        return rc;
 }
 
-/**
- * __ksmbd_auth_ntlmv2() - NTLM2(extended security) authentication handler
- * @sess:      session of connection
- * @client_nonce:      client nonce from LM response.
- * @ntlm_resp:         ntlm response data from client.
- *
- * Return:     0 on success, error number on error
- */
-static int __ksmbd_auth_ntlmv2(struct ksmbd_session *sess, char *client_nonce,
-                              char *ntlm_resp)
-{
-       char sess_key[CIFS_SMB1_SESSKEY_SIZE] = {0};
-       int rc;
-       unsigned char p21[21];
-       char key[CIFS_AUTH_RESP_SIZE];
-
-       rc = ksmbd_enc_update_sess_key(sess_key,
-                                      client_nonce,
-                                      (char *)sess->ntlmssp.cryptkey, 8);
-       if (rc) {
-               pr_err("password processing failed\n");
-               goto out;
-       }
-
-       memset(p21, '\0', 21);
-       memcpy(p21, user_passkey(sess->user), CIFS_NTHASH_SIZE);
-       rc = ksmbd_enc_p24(p21, sess_key, key);
-       if (rc) {
-               pr_err("password processing failed\n");
-               goto out;
-       }
-
-       if (memcmp(ntlm_resp, key, CIFS_AUTH_RESP_SIZE) != 0)
-               rc = -EINVAL;
-out:
-       return rc;
-}
-
 /**
  * ksmbd_decode_ntlmssp_auth_blob() - helper function to construct
  * authenticate blob
@@ -492,8 +298,8 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
                                   int blob_len, struct ksmbd_session *sess)
 {
        char *domain_name;
-       unsigned int lm_off, nt_off;
-       unsigned short nt_len;
+       unsigned int nt_off, dn_off;
+       unsigned short nt_len, dn_len;
        int ret;
 
        if (blob_len < sizeof(struct authenticate_message)) {
@@ -508,26 +314,17 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
                return -EINVAL;
        }
 
-       lm_off = le32_to_cpu(authblob->LmChallengeResponse.BufferOffset);
        nt_off = le32_to_cpu(authblob->NtChallengeResponse.BufferOffset);
        nt_len = le16_to_cpu(authblob->NtChallengeResponse.Length);
+       dn_off = le32_to_cpu(authblob->DomainName.BufferOffset);
+       dn_len = le16_to_cpu(authblob->DomainName.Length);
 
-       /* process NTLM authentication */
-       if (nt_len == CIFS_AUTH_RESP_SIZE) {
-               if (le32_to_cpu(authblob->NegotiateFlags) &
-                   NTLMSSP_NEGOTIATE_EXTENDED_SEC)
-                       return __ksmbd_auth_ntlmv2(sess, (char *)authblob +
-                               lm_off, (char *)authblob + nt_off);
-               else
-                       return ksmbd_auth_ntlm(sess, (char *)authblob +
-                               nt_off);
-       }
+       if (blob_len < (u64)dn_off + dn_len || blob_len < (u64)nt_off + nt_len)
+               return -EINVAL;
 
        /* TODO : use domain name that imported from configuration file */
-       domain_name = smb_strndup_from_utf16((const char *)authblob +
-                       le32_to_cpu(authblob->DomainName.BufferOffset),
-                       le16_to_cpu(authblob->DomainName.Length), true,
-                       sess->conn->local_nls);
+       domain_name = smb_strndup_from_utf16((const char *)authblob + dn_off,
+                                            dn_len, true, sess->conn->local_nls);
        if (IS_ERR(domain_name))
                return PTR_ERR(domain_name);
 
index af086d35398aea60fb7e795df94807689f721a33..b57a0d8a392ff5c8a388e042be4811edcd0a7d9f 100644 (file)
@@ -61,6 +61,8 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
                conn->local_nls = load_nls_default();
        atomic_set(&conn->req_running, 0);
        atomic_set(&conn->r_count, 0);
+       conn->total_credits = 1;
+
        init_waitqueue_head(&conn->req_running_q);
        INIT_LIST_HEAD(&conn->conns_list);
        INIT_LIST_HEAD(&conn->sessions);
@@ -296,10 +298,12 @@ int ksmbd_conn_handler_loop(void *p)
                pdu_size = get_rfc1002_len(hdr_buf);
                ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
 
-               /* make sure we have enough to get to SMB header end */
-               if (!ksmbd_pdu_size_has_room(pdu_size)) {
-                       ksmbd_debug(CONN, "SMB request too short (%u bytes)\n",
-                                   pdu_size);
+               /*
+                * Check if pdu size is valid (min : smb header size,
+                * max : 0x00FFFFFF).
+                */
+               if (pdu_size < __SMB2_HEADER_STRUCTURE_SIZE ||
+                   pdu_size > MAX_STREAM_PROT_LEN) {
                        continue;
                }
 
index 5f4b1008d17e0bc51888797e3ea9516d74e0e4c3..81488d04199da8da6a83d638a44a431fdfa629f6 100644 (file)
@@ -81,12 +81,6 @@ static struct shash_desc *alloc_shash_desc(int id)
        case CRYPTO_SHASH_SHA512:
                tfm = crypto_alloc_shash("sha512", 0, 0);
                break;
-       case CRYPTO_SHASH_MD4:
-               tfm = crypto_alloc_shash("md4", 0, 0);
-               break;
-       case CRYPTO_SHASH_MD5:
-               tfm = crypto_alloc_shash("md5", 0, 0);
-               break;
        default:
                return NULL;
        }
@@ -214,16 +208,6 @@ struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha512(void)
        return ____crypto_shash_ctx_find(CRYPTO_SHASH_SHA512);
 }
 
-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md4(void)
-{
-       return ____crypto_shash_ctx_find(CRYPTO_SHASH_MD4);
-}
-
-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md5(void)
-{
-       return ____crypto_shash_ctx_find(CRYPTO_SHASH_MD5);
-}
-
 static struct ksmbd_crypto_ctx *____crypto_aead_ctx_find(int id)
 {
        struct ksmbd_crypto_ctx *ctx;
index ef11154b43df3652335f8c73ee504f95df0250a1..4a367c62f6536662979a3cb4caf8ad2a253fcb9a 100644 (file)
@@ -15,8 +15,6 @@ enum {
        CRYPTO_SHASH_CMACAES,
        CRYPTO_SHASH_SHA256,
        CRYPTO_SHASH_SHA512,
-       CRYPTO_SHASH_MD4,
-       CRYPTO_SHASH_MD5,
        CRYPTO_SHASH_MAX,
 };
 
@@ -43,8 +41,6 @@ struct ksmbd_crypto_ctx {
 #define CRYPTO_CMACAES(c)      ((c)->desc[CRYPTO_SHASH_CMACAES])
 #define CRYPTO_SHA256(c)       ((c)->desc[CRYPTO_SHASH_SHA256])
 #define CRYPTO_SHA512(c)       ((c)->desc[CRYPTO_SHASH_SHA512])
-#define CRYPTO_MD4(c)          ((c)->desc[CRYPTO_SHASH_MD4])
-#define CRYPTO_MD5(c)          ((c)->desc[CRYPTO_SHASH_MD5])
 
 #define CRYPTO_HMACMD5_TFM(c)  ((c)->desc[CRYPTO_SHASH_HMACMD5]->tfm)
 #define CRYPTO_HMACSHA256_TFM(c)\
@@ -52,8 +48,6 @@ struct ksmbd_crypto_ctx {
 #define CRYPTO_CMACAES_TFM(c)  ((c)->desc[CRYPTO_SHASH_CMACAES]->tfm)
 #define CRYPTO_SHA256_TFM(c)   ((c)->desc[CRYPTO_SHASH_SHA256]->tfm)
 #define CRYPTO_SHA512_TFM(c)   ((c)->desc[CRYPTO_SHASH_SHA512]->tfm)
-#define CRYPTO_MD4_TFM(c)      ((c)->desc[CRYPTO_SHASH_MD4]->tfm)
-#define CRYPTO_MD5_TFM(c)      ((c)->desc[CRYPTO_SHASH_MD5]->tfm)
 
 #define CRYPTO_GCM(c)          ((c)->ccmaes[CRYPTO_AEAD_AES_GCM])
 #define CRYPTO_CCM(c)          ((c)->ccmaes[CRYPTO_AEAD_AES_CCM])
@@ -64,8 +58,6 @@ struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_hmacsha256(void);
 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_cmacaes(void);
 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha512(void);
 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha256(void);
-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md4(void);
-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md5(void);
 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_gcm(void);
 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_ccm(void);
 void ksmbd_crypto_destroy(void);
index 49a5a3afa118162e70c49fe4e3048483d0c97d23..5b8f3e0ebdb36aef86f79c1ca475e646fa300f87 100644 (file)
@@ -12,7 +12,7 @@
 #include "unicode.h"
 #include "vfs_cache.h"
 
-#define KSMBD_VERSION  "3.1.9"
+#define KSMBD_VERSION  "3.4.2"
 
 extern int ksmbd_debug_types;
 
index 2fbe2bc1e093f5b8721fe20074da3b17dac97add..c6718a05d347fa4c9a864efd266142fbbddd1c2f 100644 (file)
@@ -211,6 +211,7 @@ struct ksmbd_tree_disconnect_request {
  */
 struct ksmbd_logout_request {
        __s8    account[KSMBD_REQ_MAX_ACCOUNT_NAME_SZ]; /* user account name */
+       __u32   account_flags;
 };
 
 /*
@@ -317,6 +318,7 @@ enum KSMBD_TREE_CONN_STATUS {
 #define KSMBD_USER_FLAG_BAD_UID                BIT(2)
 #define KSMBD_USER_FLAG_BAD_USER       BIT(3)
 #define KSMBD_USER_FLAG_GUEST_ACCOUNT  BIT(4)
+#define KSMBD_USER_FLAG_DELAY_SESSION  BIT(5)
 
 /*
  * Share config flags.
index d21629ae5c89f79a3e993055646a467352917b6c..1019d3677d553a025784fbb45e4fe420a00ef950 100644 (file)
@@ -55,7 +55,7 @@ struct ksmbd_user *ksmbd_alloc_user(struct ksmbd_login_response *resp)
 
 void ksmbd_free_user(struct ksmbd_user *user)
 {
-       ksmbd_ipc_logout_request(user->name);
+       ksmbd_ipc_logout_request(user->name, user->flags);
        kfree(user->name);
        kfree(user->passkey);
        kfree(user);
index b2bb074a0150f02a225b1080808d3d93aaa65e3a..aff80b0295790c005a33f6ba6b4a4d4f84f2ef29 100644 (file)
@@ -18,6 +18,7 @@ struct ksmbd_user {
 
        size_t                  passkey_sz;
        char                    *passkey;
+       unsigned int            failed_login_count;
 };
 
 static inline bool user_guest(struct ksmbd_user *user)
index 6a19f4bc692d3008f47e1d49a4de46f7312def28..60e7ac62c9172f5cac831b677f184f045a4a0245 100644 (file)
@@ -162,17 +162,14 @@ char *convert_to_nt_pathname(char *filename)
 {
        char *ab_pathname;
 
-       if (strlen(filename) == 0) {
-               ab_pathname = kmalloc(2, GFP_KERNEL);
-               ab_pathname[0] = '\\';
-               ab_pathname[1] = '\0';
-       } else {
-               ab_pathname = kstrdup(filename, GFP_KERNEL);
-               if (!ab_pathname)
-                       return NULL;
+       if (strlen(filename) == 0)
+               filename = "\\";
 
-               ksmbd_conv_path_to_windows(ab_pathname);
-       }
+       ab_pathname = kstrdup(filename, GFP_KERNEL);
+       if (!ab_pathname)
+               return NULL;
+
+       ksmbd_conv_path_to_windows(ab_pathname);
        return ab_pathname;
 }
 
index 16b6236d1bd20a557874e54a31304bc9c49d4de6..f9dae6ef21150ae9f26129001bb3c90e35195afc 100644 (file)
@@ -1451,26 +1451,47 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
  */
 struct create_context *smb2_find_context_vals(void *open_req, const char *tag)
 {
-       char *data_offset;
        struct create_context *cc;
        unsigned int next = 0;
        char *name;
        struct smb2_create_req *req = (struct smb2_create_req *)open_req;
+       unsigned int remain_len, name_off, name_len, value_off, value_len,
+                    cc_len;
 
-       data_offset = (char *)req + 4 + le32_to_cpu(req->CreateContextsOffset);
-       cc = (struct create_context *)data_offset;
+       /*
+        * CreateContextsOffset and CreateContextsLength are guaranteed to
+        * be valid because of ksmbd_smb2_check_message().
+        */
+       cc = (struct create_context *)((char *)req + 4 +
+                                      le32_to_cpu(req->CreateContextsOffset));
+       remain_len = le32_to_cpu(req->CreateContextsLength);
        do {
-               int val;
-
                cc = (struct create_context *)((char *)cc + next);
-               name = le16_to_cpu(cc->NameOffset) + (char *)cc;
-               val = le16_to_cpu(cc->NameLength);
-               if (val < 4)
+               if (remain_len < offsetof(struct create_context, Buffer))
                        return ERR_PTR(-EINVAL);
 
-               if (memcmp(name, tag, val) == 0)
-                       return cc;
                next = le32_to_cpu(cc->Next);
+               name_off = le16_to_cpu(cc->NameOffset);
+               name_len = le16_to_cpu(cc->NameLength);
+               value_off = le16_to_cpu(cc->DataOffset);
+               value_len = le32_to_cpu(cc->DataLength);
+               cc_len = next ? next : remain_len;
+
+               if ((next & 0x7) != 0 ||
+                   next > remain_len ||
+                   name_off != offsetof(struct create_context, Buffer) ||
+                   name_len < 4 ||
+                   name_off + name_len > cc_len ||
+                   (value_off & 0x7) != 0 ||
+                   (value_off && (value_off < name_off + name_len)) ||
+                   ((u64)value_off + value_len > cc_len))
+                       return ERR_PTR(-EINVAL);
+
+               name = (char *)cc + name_off;
+               if (memcmp(name, tag, name_len) == 0)
+                       return cc;
+
+               remain_len -= next;
        } while (next != 0);
 
        return NULL;
index 9aa46bb3e10d45d700af6eeb1a80106637883e82..030ca57c378498fe742c1ff34a0819b0fc2ac73c 100644 (file)
@@ -80,18 +80,21 @@ static const bool has_smb2_data_area[NUMBER_OF_SMB2_COMMANDS] = {
 };
 
 /*
- * Returns the pointer to the beginning of the data area. Length of the data
- * area and the offset to it (from the beginning of the smb are also returned.
+ * Set length of the data area and the offset to arguments.
+ * if they are invalid, return error.
  */
-static char *smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr)
+static int smb2_get_data_area_len(unsigned int *off, unsigned int *len,
+                                 struct smb2_hdr *hdr)
 {
+       int ret = 0;
+
        *off = 0;
        *len = 0;
 
        /* error reqeusts do not have data area */
        if (hdr->Status && hdr->Status != STATUS_MORE_PROCESSING_REQUIRED &&
            (((struct smb2_err_rsp *)hdr)->StructureSize) == SMB2_ERROR_STRUCTURE_SIZE2_LE)
-               return NULL;
+               return ret;
 
        /*
         * Following commands have data areas so we have to get the location
@@ -165,69 +168,60 @@ static char *smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr)
        case SMB2_IOCTL:
                *off = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputOffset);
                *len = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputCount);
-
                break;
        default:
                ksmbd_debug(SMB, "no length check for command\n");
                break;
        }
 
-       /*
-        * Invalid length or offset probably means data area is invalid, but
-        * we have little choice but to ignore the data area in this case.
-        */
        if (*off > 4096) {
-               ksmbd_debug(SMB, "offset %d too large, data area ignored\n",
-                           *off);
-               *len = 0;
-               *off = 0;
-       } else if (*off < 0) {
-               ksmbd_debug(SMB,
-                           "negative offset %d to data invalid ignore data area\n",
-                           *off);
-               *off = 0;
-               *len = 0;
-       } else if (*len < 0) {
-               ksmbd_debug(SMB,
-                           "negative data length %d invalid, data area ignored\n",
-                           *len);
-               *len = 0;
-       } else if (*len > 128 * 1024) {
-               ksmbd_debug(SMB, "data area larger than 128K: %d\n", *len);
-               *len = 0;
+               ksmbd_debug(SMB, "offset %d too large\n", *off);
+               ret = -EINVAL;
+       } else if ((u64)*off + *len > MAX_STREAM_PROT_LEN) {
+               ksmbd_debug(SMB, "Request is larger than maximum stream protocol length(%u): %llu\n",
+                           MAX_STREAM_PROT_LEN, (u64)*off + *len);
+               ret = -EINVAL;
        }
 
-       /* return pointer to beginning of data area, ie offset from SMB start */
-       if ((*off != 0) && (*len != 0))
-               return (char *)hdr + *off;
-       else
-               return NULL;
+       return ret;
 }
 
 /*
  * Calculate the size of the SMB message based on the fixed header
  * portion, the number of word parameters and the data portion of the message.
  */
-static unsigned int smb2_calc_size(void *buf)
+static int smb2_calc_size(void *buf, unsigned int *len)
 {
        struct smb2_pdu *pdu = (struct smb2_pdu *)buf;
        struct smb2_hdr *hdr = &pdu->hdr;
-       int offset; /* the offset from the beginning of SMB to data area */
-       int data_length; /* the length of the variable length data area */
+       unsigned int offset; /* the offset from the beginning of SMB to data area */
+       unsigned int data_length; /* the length of the variable length data area */
+       int ret;
+
        /* Structure Size has already been checked to make sure it is 64 */
-       int len = le16_to_cpu(hdr->StructureSize);
+       *len = le16_to_cpu(hdr->StructureSize);
 
        /*
         * StructureSize2, ie length of fixed parameter area has already
         * been checked to make sure it is the correct length.
         */
-       len += le16_to_cpu(pdu->StructureSize2);
+       *len += le16_to_cpu(pdu->StructureSize2);
+       /*
+        * StructureSize2 of smb2_lock pdu is set to 48, indicating
+        * the size of smb2 lock request with single smb2_lock_element
+        * regardless of number of locks. Subtract single
+        * smb2_lock_element for correct buffer size check.
+        */
+       if (hdr->Command == SMB2_LOCK)
+               *len -= sizeof(struct smb2_lock_element);
 
        if (has_smb2_data_area[le16_to_cpu(hdr->Command)] == false)
                goto calc_size_exit;
 
-       smb2_get_data_area_len(&offset, &data_length, hdr);
-       ksmbd_debug(SMB, "SMB2 data length %d offset %d\n", data_length,
+       ret = smb2_get_data_area_len(&offset, &data_length, hdr);
+       if (ret)
+               return ret;
+       ksmbd_debug(SMB, "SMB2 data length %u offset %u\n", data_length,
                    offset);
 
        if (data_length > 0) {
@@ -237,16 +231,19 @@ static unsigned int smb2_calc_size(void *buf)
                 * for some commands, typically those with odd StructureSize,
                 * so we must add one to the calculation.
                 */
-               if (offset + 1 < len)
+               if (offset + 1 < *len) {
                        ksmbd_debug(SMB,
-                                   "data area offset %d overlaps SMB2 header %d\n",
-                                   offset + 1, len);
-               else
-                       len = offset + data_length;
+                                   "data area offset %d overlaps SMB2 header %u\n",
+                                   offset + 1, *len);
+                       return -EINVAL;
+               }
+
+               *len = offset + data_length;
        }
+
 calc_size_exit:
-       ksmbd_debug(SMB, "SMB2 len %d\n", len);
-       return len;
+       ksmbd_debug(SMB, "SMB2 len %u\n", *len);
+       return 0;
 }
 
 static inline int smb2_query_info_req_len(struct smb2_query_info_req *h)
@@ -287,11 +284,13 @@ static inline int smb2_ioctl_resp_len(struct smb2_ioctl_req *h)
                le32_to_cpu(h->MaxOutputResponse);
 }
 
-static int smb2_validate_credit_charge(struct smb2_hdr *hdr)
+static int smb2_validate_credit_charge(struct ksmbd_conn *conn,
+                                      struct smb2_hdr *hdr)
 {
-       int req_len = 0, expect_resp_len = 0, calc_credit_num, max_len;
-       int credit_charge = le16_to_cpu(hdr->CreditCharge);
+       unsigned int req_len = 0, expect_resp_len = 0, calc_credit_num, max_len;
+       unsigned short credit_charge = le16_to_cpu(hdr->CreditCharge);
        void *__hdr = hdr;
+       int ret;
 
        switch (hdr->Command) {
        case SMB2_QUERY_INFO:
@@ -313,21 +312,37 @@ static int smb2_validate_credit_charge(struct smb2_hdr *hdr)
                req_len = smb2_ioctl_req_len(__hdr);
                expect_resp_len = smb2_ioctl_resp_len(__hdr);
                break;
-       default:
+       case SMB2_CANCEL:
                return 0;
+       default:
+               req_len = 1;
+               break;
        }
 
-       credit_charge = max(1, credit_charge);
-       max_len = max(req_len, expect_resp_len);
+       credit_charge = max_t(unsigned short, credit_charge, 1);
+       max_len = max_t(unsigned int, req_len, expect_resp_len);
        calc_credit_num = DIV_ROUND_UP(max_len, SMB2_MAX_BUFFER_SIZE);
 
        if (credit_charge < calc_credit_num) {
-               pr_err("Insufficient credit charge, given: %d, needed: %d\n",
-                      credit_charge, calc_credit_num);
+               ksmbd_debug(SMB, "Insufficient credit charge, given: %d, needed: %d\n",
+                           credit_charge, calc_credit_num);
+               return 1;
+       } else if (credit_charge > conn->max_credits) {
+               ksmbd_debug(SMB, "Too large credit charge: %d\n", credit_charge);
                return 1;
        }
 
-       return 0;
+       spin_lock(&conn->credits_lock);
+       if (credit_charge <= conn->total_credits) {
+               conn->total_credits -= credit_charge;
+               ret = 0;
+       } else {
+               ksmbd_debug(SMB, "Insufficient credits granted, given: %u, granted: %u\n",
+                           credit_charge, conn->total_credits);
+               ret = 1;
+       }
+       spin_unlock(&conn->credits_lock);
+       return ret;
 }
 
 int ksmbd_smb2_check_message(struct ksmbd_work *work)
@@ -385,24 +400,20 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
                }
        }
 
-       if ((work->conn->vals->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU) &&
-           smb2_validate_credit_charge(hdr)) {
-               work->conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER);
+       if (smb2_calc_size(hdr, &clc_len))
                return 1;
-       }
 
-       clc_len = smb2_calc_size(hdr);
        if (len != clc_len) {
-               /* server can return one byte more due to implied bcc[0] */
+               /* client can return one byte more due to implied bcc[0] */
                if (clc_len == len + 1)
-                       return 0;
+                       goto validate_credit;
 
                /*
                 * Some windows servers (win2016) will pad also the final
                 * PDU in a compound to 8 bytes.
                 */
                if (ALIGN(clc_len, 8) == len)
-                       return 0;
+                       goto validate_credit;
 
                /*
                 * windows client also pad up to 8 bytes when compounding.
@@ -415,12 +426,9 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
                                    "cli req padded more than expected. Length %d not %d for cmd:%d mid:%llu\n",
                                    len, clc_len, command,
                                    le64_to_cpu(hdr->MessageId));
-                       return 0;
+                       goto validate_credit;
                }
 
-               if (command == SMB2_LOCK_HE && len == 88)
-                       return 0;
-
                ksmbd_debug(SMB,
                            "cli req too short, len %d not %d. cmd:%d mid:%llu\n",
                            len, clc_len, command,
@@ -429,6 +437,13 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
                return 1;
        }
 
+validate_credit:
+       if ((work->conn->vals->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU) &&
+           smb2_validate_credit_charge(work->conn, hdr)) {
+               work->conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER);
+               return 1;
+       }
+
        return 0;
 }
 
index 197473871aa47b6718b75dce17933a2bbb23709e..fb6a65d231391b9204fb92772935fa592b0b321e 100644 (file)
@@ -187,11 +187,6 @@ static struct smb_version_cmds smb2_0_server_cmds[NUMBER_OF_SMB2_COMMANDS] = {
        [SMB2_CHANGE_NOTIFY_HE] =       { .proc = smb2_notify},
 };
 
-int init_smb2_0_server(struct ksmbd_conn *conn)
-{
-       return -EOPNOTSUPP;
-}
-
 /**
  * init_smb2_1_server() - initialize a smb server connection with smb2.1
  *                     command dispatcher
@@ -289,6 +284,7 @@ int init_smb3_11_server(struct ksmbd_conn *conn)
 
 void init_smb2_max_read_size(unsigned int sz)
 {
+       sz = clamp_val(sz, SMB3_MIN_IOSIZE, SMB3_MAX_IOSIZE);
        smb21_server_values.max_read_size = sz;
        smb30_server_values.max_read_size = sz;
        smb302_server_values.max_read_size = sz;
@@ -297,6 +293,7 @@ void init_smb2_max_read_size(unsigned int sz)
 
 void init_smb2_max_write_size(unsigned int sz)
 {
+       sz = clamp_val(sz, SMB3_MIN_IOSIZE, SMB3_MAX_IOSIZE);
        smb21_server_values.max_write_size = sz;
        smb30_server_values.max_write_size = sz;
        smb302_server_values.max_write_size = sz;
@@ -305,6 +302,7 @@ void init_smb2_max_write_size(unsigned int sz)
 
 void init_smb2_max_trans_size(unsigned int sz)
 {
+       sz = clamp_val(sz, SMB3_MIN_IOSIZE, SMB3_MAX_IOSIZE);
        smb21_server_values.max_trans_size = sz;
        smb30_server_values.max_trans_size = sz;
        smb302_server_values.max_trans_size = sz;
index 761e12171dc4418fb2594bea381fc2a93df57f69..7e448df3f8474c49cea0e1a361f4220caf83abca 100644 (file)
@@ -236,9 +236,6 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
 
        if (conn->need_neg == false)
                return -EINVAL;
-       if (!(conn->dialect >= SMB20_PROT_ID &&
-             conn->dialect <= SMB311_PROT_ID))
-               return -EINVAL;
 
        rsp_hdr = work->response_buf;
 
@@ -295,22 +292,6 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
        return 0;
 }
 
-static int smb2_consume_credit_charge(struct ksmbd_work *work,
-                                     unsigned short credit_charge)
-{
-       struct ksmbd_conn *conn = work->conn;
-       unsigned int rsp_credits = 1;
-
-       if (!conn->total_credits)
-               return 0;
-
-       if (credit_charge > 0)
-               rsp_credits = credit_charge;
-
-       conn->total_credits -= rsp_credits;
-       return rsp_credits;
-}
-
 /**
  * smb2_set_rsp_credits() - set number of credits in response buffer
  * @work:      smb work containing smb response buffer
@@ -320,49 +301,43 @@ int smb2_set_rsp_credits(struct ksmbd_work *work)
        struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work);
        struct smb2_hdr *hdr = ksmbd_resp_buf_next(work);
        struct ksmbd_conn *conn = work->conn;
-       unsigned short credits_requested = le16_to_cpu(req_hdr->CreditRequest);
-       unsigned short credit_charge = 1, credits_granted = 0;
-       unsigned short aux_max, aux_credits, min_credits;
-       int rsp_credit_charge;
+       unsigned short credits_requested;
+       unsigned short credit_charge, credits_granted = 0;
+       unsigned short aux_max, aux_credits;
 
-       if (hdr->Command == SMB2_CANCEL)
-               goto out;
+       if (work->send_no_response)
+               return 0;
 
-       /* get default minimum credits by shifting maximum credits by 4 */
-       min_credits = conn->max_credits >> 4;
+       hdr->CreditCharge = req_hdr->CreditCharge;
 
-       if (conn->total_credits >= conn->max_credits) {
+       if (conn->total_credits > conn->max_credits) {
+               hdr->CreditRequest = 0;
                pr_err("Total credits overflow: %d\n", conn->total_credits);
-               conn->total_credits = min_credits;
-       }
-
-       rsp_credit_charge =
-               smb2_consume_credit_charge(work, le16_to_cpu(req_hdr->CreditCharge));
-       if (rsp_credit_charge < 0)
                return -EINVAL;
+       }
 
-       hdr->CreditCharge = cpu_to_le16(rsp_credit_charge);
+       credit_charge = max_t(unsigned short,
+                             le16_to_cpu(req_hdr->CreditCharge), 1);
+       credits_requested = max_t(unsigned short,
+                                 le16_to_cpu(req_hdr->CreditRequest), 1);
 
-       if (credits_requested > 0) {
-               aux_credits = credits_requested - 1;
-               aux_max = 32;
-               if (hdr->Command == SMB2_NEGOTIATE)
-                       aux_max = 0;
-               aux_credits = (aux_credits < aux_max) ? aux_credits : aux_max;
-               credits_granted = aux_credits + credit_charge;
+       /* according to smb2.credits smbtorture, Windows server
+        * 2016 or later grant up to 8192 credits at once.
+        *
+        * TODO: Need to adjuct CreditRequest value according to
+        * current cpu load
+        */
+       aux_credits = credits_requested - 1;
+       if (hdr->Command == SMB2_NEGOTIATE)
+               aux_max = 0;
+       else
+               aux_max = conn->max_credits - credit_charge;
+       aux_credits = min_t(unsigned short, aux_credits, aux_max);
+       credits_granted = credit_charge + aux_credits;
 
-               /* if credits granted per client is getting bigger than default
-                * minimum credits then we should wrap it up within the limits.
-                */
-               if ((conn->total_credits + credits_granted) > min_credits)
-                       credits_granted = min_credits - conn->total_credits;
-               /*
-                * TODO: Need to adjuct CreditRequest value according to
-                * current cpu load
-                */
-       } else if (conn->total_credits == 0) {
-               credits_granted = 1;
-       }
+       if (conn->max_credits - conn->total_credits < credits_granted)
+               credits_granted = conn->max_credits -
+                       conn->total_credits;
 
        conn->total_credits += credits_granted;
        work->credits_granted += credits_granted;
@@ -371,7 +346,6 @@ int smb2_set_rsp_credits(struct ksmbd_work *work)
                /* Update CreditRequest in last request */
                hdr->CreditRequest = cpu_to_le16(work->credits_granted);
        }
-out:
        ksmbd_debug(SMB,
                    "credits: requested[%d] granted[%d] total_granted[%d]\n",
                    credits_requested, credits_granted,
@@ -459,13 +433,28 @@ static void init_chained_smb2_rsp(struct ksmbd_work *work)
 bool is_chained_smb2_message(struct ksmbd_work *work)
 {
        struct smb2_hdr *hdr = work->request_buf;
-       unsigned int len;
+       unsigned int len, next_cmd;
 
        if (hdr->ProtocolId != SMB2_PROTO_NUMBER)
                return false;
 
        hdr = ksmbd_req_buf_next(work);
-       if (le32_to_cpu(hdr->NextCommand) > 0) {
+       next_cmd = le32_to_cpu(hdr->NextCommand);
+       if (next_cmd > 0) {
+               if ((u64)work->next_smb2_rcv_hdr_off + next_cmd +
+                       __SMB2_HEADER_STRUCTURE_SIZE >
+                   get_rfc1002_len(work->request_buf)) {
+                       pr_err("next command(%u) offset exceeds smb msg size\n",
+                              next_cmd);
+                       return false;
+               }
+
+               if ((u64)get_rfc1002_len(work->response_buf) + MAX_CIFS_SMALL_BUFFER_SIZE >
+                   work->response_sz) {
+                       pr_err("next response offset exceeds response buffer size\n");
+                       return false;
+               }
+
                ksmbd_debug(SMB, "got SMB2 chained command\n");
                init_chained_smb2_rsp(work);
                return true;
@@ -535,7 +524,7 @@ int smb2_allocate_rsp_buf(struct ksmbd_work *work)
 {
        struct smb2_hdr *hdr = work->request_buf;
        size_t small_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
-       size_t large_sz = work->conn->vals->max_trans_size + MAX_SMB2_HDR_SIZE;
+       size_t large_sz = small_sz + work->conn->vals->max_trans_size;
        size_t sz = small_sz;
        int cmd = le16_to_cpu(hdr->Command);
 
@@ -1058,6 +1047,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
        struct smb2_negotiate_req *req = work->request_buf;
        struct smb2_negotiate_rsp *rsp = work->response_buf;
        int rc = 0;
+       unsigned int smb2_buf_len, smb2_neg_size;
        __le32 status;
 
        ksmbd_debug(SMB, "Received negotiate request\n");
@@ -1075,6 +1065,44 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
                goto err_out;
        }
 
+       smb2_buf_len = get_rfc1002_len(work->request_buf);
+       smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects) - 4;
+       if (smb2_neg_size > smb2_buf_len) {
+               rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+               rc = -EINVAL;
+               goto err_out;
+       }
+
+       if (conn->dialect == SMB311_PROT_ID) {
+               unsigned int nego_ctxt_off = le32_to_cpu(req->NegotiateContextOffset);
+
+               if (smb2_buf_len < nego_ctxt_off) {
+                       rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+                       rc = -EINVAL;
+                       goto err_out;
+               }
+
+               if (smb2_neg_size > nego_ctxt_off) {
+                       rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+                       rc = -EINVAL;
+                       goto err_out;
+               }
+
+               if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >
+                   nego_ctxt_off) {
+                       rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+                       rc = -EINVAL;
+                       goto err_out;
+               }
+       } else {
+               if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >
+                   smb2_buf_len) {
+                       rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+                       rc = -EINVAL;
+                       goto err_out;
+               }
+       }
+
        conn->cli_cap = le32_to_cpu(req->Capabilities);
        switch (conn->dialect) {
        case SMB311_PROT_ID:
@@ -1118,13 +1146,6 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
        case SMB21_PROT_ID:
                init_smb2_1_server(conn);
                break;
-       case SMB20_PROT_ID:
-               rc = init_smb2_0_server(conn);
-               if (rc) {
-                       rsp->hdr.Status = STATUS_NOT_SUPPORTED;
-                       goto err_out;
-               }
-               break;
        case SMB2X_PROT_ID:
        case BAD_PROT_ID:
        default:
@@ -1143,11 +1164,9 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
        rsp->MaxReadSize = cpu_to_le32(conn->vals->max_read_size);
        rsp->MaxWriteSize = cpu_to_le32(conn->vals->max_write_size);
 
-       if (conn->dialect > SMB20_PROT_ID) {
-               memcpy(conn->ClientGUID, req->ClientGUID,
-                      SMB2_CLIENT_GUID_SIZE);
-               conn->cli_sec_mode = le16_to_cpu(req->SecurityMode);
-       }
+       memcpy(conn->ClientGUID, req->ClientGUID,
+                       SMB2_CLIENT_GUID_SIZE);
+       conn->cli_sec_mode = le16_to_cpu(req->SecurityMode);
 
        rsp->StructureSize = cpu_to_le16(65);
        rsp->DialectRevision = cpu_to_le16(conn->dialect);
@@ -1238,19 +1257,13 @@ static int generate_preauth_hash(struct ksmbd_work *work)
        return 0;
 }
 
-static int decode_negotiation_token(struct ksmbd_work *work,
-                                   struct negotiate_message *negblob)
+static int decode_negotiation_token(struct ksmbd_conn *conn,
+                                   struct negotiate_message *negblob,
+                                   size_t sz)
 {
-       struct ksmbd_conn *conn = work->conn;
-       struct smb2_sess_setup_req *req;
-       int sz;
-
        if (!conn->use_spnego)
                return -EINVAL;
 
-       req = work->request_buf;
-       sz = le16_to_cpu(req->SecurityBufferLength);
-
        if (ksmbd_decode_negTokenInit((char *)negblob, sz, conn)) {
                if (ksmbd_decode_negTokenTarg((char *)negblob, sz, conn)) {
                        conn->auth_mechs |= KSMBD_AUTH_NTLMSSP;
@@ -1262,9 +1275,9 @@ static int decode_negotiation_token(struct ksmbd_work *work,
 }
 
 static int ntlm_negotiate(struct ksmbd_work *work,
-                         struct negotiate_message *negblob)
+                         struct negotiate_message *negblob,
+                         size_t negblob_len)
 {
-       struct smb2_sess_setup_req *req = work->request_buf;
        struct smb2_sess_setup_rsp *rsp = work->response_buf;
        struct challenge_message *chgblob;
        unsigned char *spnego_blob = NULL;
@@ -1273,8 +1286,7 @@ static int ntlm_negotiate(struct ksmbd_work *work,
        int sz, rc;
 
        ksmbd_debug(SMB, "negotiate phase\n");
-       sz = le16_to_cpu(req->SecurityBufferLength);
-       rc = ksmbd_decode_ntlmssp_neg_blob(negblob, sz, work->sess);
+       rc = ksmbd_decode_ntlmssp_neg_blob(negblob, negblob_len, work->sess);
        if (rc)
                return rc;
 
@@ -1342,12 +1354,23 @@ static struct ksmbd_user *session_user(struct ksmbd_conn *conn,
        struct authenticate_message *authblob;
        struct ksmbd_user *user;
        char *name;
-       int sz;
+       unsigned int auth_msg_len, name_off, name_len, secbuf_len;
 
+       secbuf_len = le16_to_cpu(req->SecurityBufferLength);
+       if (secbuf_len < sizeof(struct authenticate_message)) {
+               ksmbd_debug(SMB, "blob len %d too small\n", secbuf_len);
+               return NULL;
+       }
        authblob = user_authblob(conn, req);
-       sz = le32_to_cpu(authblob->UserName.BufferOffset);
-       name = smb_strndup_from_utf16((const char *)authblob + sz,
-                                     le16_to_cpu(authblob->UserName.Length),
+       name_off = le32_to_cpu(authblob->UserName.BufferOffset);
+       name_len = le16_to_cpu(authblob->UserName.Length);
+       auth_msg_len = le16_to_cpu(req->SecurityBufferOffset) + secbuf_len;
+
+       if (auth_msg_len < (u64)name_off + name_len)
+               return NULL;
+
+       name = smb_strndup_from_utf16((const char *)authblob + name_off,
+                                     name_len,
                                      true,
                                      conn->local_nls);
        if (IS_ERR(name)) {
@@ -1489,11 +1512,9 @@ binding_session:
                }
        }
 
-       if (conn->dialect > SMB20_PROT_ID) {
-               if (!ksmbd_conn_lookup_dialect(conn)) {
-                       pr_err("fail to verify the dialect\n");
-                       return -ENOENT;
-               }
+       if (!ksmbd_conn_lookup_dialect(conn)) {
+               pr_err("fail to verify the dialect\n");
+               return -ENOENT;
        }
        return 0;
 }
@@ -1575,11 +1596,9 @@ static int krb5_authenticate(struct ksmbd_work *work)
                }
        }
 
-       if (conn->dialect > SMB20_PROT_ID) {
-               if (!ksmbd_conn_lookup_dialect(conn)) {
-                       pr_err("fail to verify the dialect\n");
-                       return -ENOENT;
-               }
+       if (!ksmbd_conn_lookup_dialect(conn)) {
+               pr_err("fail to verify the dialect\n");
+               return -ENOENT;
        }
        return 0;
 }
@@ -1597,6 +1616,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
        struct smb2_sess_setup_rsp *rsp = work->response_buf;
        struct ksmbd_session *sess;
        struct negotiate_message *negblob;
+       unsigned int negblob_len, negblob_off;
        int rc = 0;
 
        ksmbd_debug(SMB, "Received request for session setup\n");
@@ -1677,10 +1697,16 @@ int smb2_sess_setup(struct ksmbd_work *work)
        if (sess->state == SMB2_SESSION_EXPIRED)
                sess->state = SMB2_SESSION_IN_PROGRESS;
 
+       negblob_off = le16_to_cpu(req->SecurityBufferOffset);
+       negblob_len = le16_to_cpu(req->SecurityBufferLength);
+       if (negblob_off < (offsetof(struct smb2_sess_setup_req, Buffer) - 4) ||
+           negblob_len < offsetof(struct negotiate_message, NegotiateFlags))
+               return -EINVAL;
+
        negblob = (struct negotiate_message *)((char *)&req->hdr.ProtocolId +
-                       le16_to_cpu(req->SecurityBufferOffset));
+                       negblob_off);
 
-       if (decode_negotiation_token(work, negblob) == 0) {
+       if (decode_negotiation_token(conn, negblob, negblob_len) == 0) {
                if (conn->mechToken)
                        negblob = (struct negotiate_message *)conn->mechToken;
        }
@@ -1704,7 +1730,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
                        sess->Preauth_HashValue = NULL;
                } else if (conn->preferred_auth_mech == KSMBD_AUTH_NTLMSSP) {
                        if (negblob->MessageType == NtLmNegotiate) {
-                               rc = ntlm_negotiate(work, negblob);
+                               rc = ntlm_negotiate(work, negblob, negblob_len);
                                if (rc)
                                        goto out_err;
                                rsp->hdr.Status =
@@ -1764,9 +1790,30 @@ out_err:
                conn->mechToken = NULL;
        }
 
-       if (rc < 0 && sess) {
-               ksmbd_session_destroy(sess);
-               work->sess = NULL;
+       if (rc < 0) {
+               /*
+                * SecurityBufferOffset should be set to zero
+                * in session setup error response.
+                */
+               rsp->SecurityBufferOffset = 0;
+
+               if (sess) {
+                       bool try_delay = false;
+
+                       /*
+                        * To avoid dictionary attacks (repeated session setups rapidly sent) to
+                        * connect to server, ksmbd make a delay of a 5 seconds on session setup
+                        * failure to make it harder to send enough random connection requests
+                        * to break into a server.
+                        */
+                       if (sess->user && sess->user->flags & KSMBD_USER_FLAG_DELAY_SESSION)
+                               try_delay = true;
+
+                       ksmbd_session_destroy(sess);
+                       work->sess = NULL;
+                       if (try_delay)
+                               ssleep(5);
+               }
        }
 
        return rc;
@@ -2093,16 +2140,22 @@ out:
  * smb2_set_ea() - handler for setting extended attributes using set
  *             info command
  * @eabuf:     set info command buffer
+ * @buf_len:   set info command buffer length
  * @path:      dentry path for get ea
  *
  * Return:     0 on success, otherwise error
  */
-static int smb2_set_ea(struct smb2_ea_info *eabuf, struct path *path)
+static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
+                      struct path *path)
 {
        struct user_namespace *user_ns = mnt_user_ns(path->mnt);
        char *attr_name = NULL, *value;
        int rc = 0;
-       int next = 0;
+       unsigned int next = 0;
+
+       if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength +
+                       le16_to_cpu(eabuf->EaValueLength))
+               return -EINVAL;
 
        attr_name = kmalloc(XATTR_NAME_MAX + 1, GFP_KERNEL);
        if (!attr_name)
@@ -2167,7 +2220,13 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, struct path *path)
 
 next:
                next = le32_to_cpu(eabuf->NextEntryOffset);
+               if (next == 0 || buf_len < next)
+                       break;
+               buf_len -= next;
                eabuf = (struct smb2_ea_info *)((char *)eabuf + next);
+               if (next < (u32)eabuf->EaNameLength + le16_to_cpu(eabuf->EaValueLength))
+                       break;
+
        } while (next != 0);
 
        kfree(attr_name);
@@ -2367,6 +2426,10 @@ static int smb2_create_sd_buffer(struct ksmbd_work *work,
        ksmbd_debug(SMB,
                    "Set ACLs using SMB2_CREATE_SD_BUFFER context\n");
        sd_buf = (struct create_sd_buf_req *)context;
+       if (le16_to_cpu(context->DataOffset) +
+           le32_to_cpu(context->DataLength) <
+           sizeof(struct create_sd_buf_req))
+               return -EINVAL;
        return set_info_sec(work->conn, work->tcon, path, &sd_buf->ntsd,
                            le32_to_cpu(sd_buf->ccontext.DataLength), true);
 }
@@ -2561,6 +2624,12 @@ int smb2_open(struct ksmbd_work *work)
                        goto err_out1;
                } else if (context) {
                        ea_buf = (struct create_ea_buf_req *)context;
+                       if (le16_to_cpu(context->DataOffset) +
+                           le32_to_cpu(context->DataLength) <
+                           sizeof(struct create_ea_buf_req)) {
+                               rc = -EINVAL;
+                               goto err_out1;
+                       }
                        if (req->CreateOptions & FILE_NO_EA_KNOWLEDGE_LE) {
                                rsp->hdr.Status = STATUS_ACCESS_DENIED;
                                rc = -EACCES;
@@ -2599,6 +2668,12 @@ int smb2_open(struct ksmbd_work *work)
                        } else if (context) {
                                struct create_posix *posix =
                                        (struct create_posix *)context;
+                               if (le16_to_cpu(context->DataOffset) +
+                                   le32_to_cpu(context->DataLength) <
+                                   sizeof(struct create_posix)) {
+                                       rc = -EINVAL;
+                                       goto err_out1;
+                               }
                                ksmbd_debug(SMB, "get posix context\n");
 
                                posix_mode = le32_to_cpu(posix->Mode);
@@ -2748,7 +2823,15 @@ int smb2_open(struct ksmbd_work *work)
                created = true;
                user_ns = mnt_user_ns(path.mnt);
                if (ea_buf) {
-                       rc = smb2_set_ea(&ea_buf->ea, &path);
+                       if (le32_to_cpu(ea_buf->ccontext.DataLength) <
+                           sizeof(struct smb2_ea_info)) {
+                               rc = -EINVAL;
+                               goto err_out;
+                       }
+
+                       rc = smb2_set_ea(&ea_buf->ea,
+                                        le32_to_cpu(ea_buf->ccontext.DataLength),
+                                        &path);
                        if (rc == -EOPNOTSUPP)
                                rc = 0;
                        else if (rc)
@@ -2981,9 +3064,16 @@ int smb2_open(struct ksmbd_work *work)
                        rc = PTR_ERR(az_req);
                        goto err_out;
                } else if (az_req) {
-                       loff_t alloc_size = le64_to_cpu(az_req->AllocationSize);
+                       loff_t alloc_size;
                        int err;
 
+                       if (le16_to_cpu(az_req->ccontext.DataOffset) +
+                           le32_to_cpu(az_req->ccontext.DataLength) <
+                           sizeof(struct create_alloc_size_req)) {
+                               rc = -EINVAL;
+                               goto err_out;
+                       }
+                       alloc_size = le64_to_cpu(az_req->AllocationSize);
                        ksmbd_debug(SMB,
                                    "request smb2 create allocate size : %llu\n",
                                    alloc_size);
@@ -3704,6 +3794,24 @@ static int verify_info_level(int info_level)
        return 0;
 }
 
+static int smb2_calc_max_out_buf_len(struct ksmbd_work *work,
+                                    unsigned short hdr2_len,
+                                    unsigned int out_buf_len)
+{
+       int free_len;
+
+       if (out_buf_len > work->conn->vals->max_trans_size)
+               return -EINVAL;
+
+       free_len = (int)(work->response_sz -
+                        (get_rfc1002_len(work->response_buf) + 4)) -
+               hdr2_len;
+       if (free_len < 0)
+               return -EINVAL;
+
+       return min_t(int, out_buf_len, free_len);
+}
+
 int smb2_query_dir(struct ksmbd_work *work)
 {
        struct ksmbd_conn *conn = work->conn;
@@ -3780,9 +3888,13 @@ int smb2_query_dir(struct ksmbd_work *work)
        memset(&d_info, 0, sizeof(struct ksmbd_dir_info));
        d_info.wptr = (char *)rsp->Buffer;
        d_info.rptr = (char *)rsp->Buffer;
-       d_info.out_buf_len = (work->response_sz - (get_rfc1002_len(rsp_org) + 4));
-       d_info.out_buf_len = min_t(int, d_info.out_buf_len, le32_to_cpu(req->OutputBufferLength)) -
-               sizeof(struct smb2_query_directory_rsp);
+       d_info.out_buf_len =
+               smb2_calc_max_out_buf_len(work, 8,
+                                         le32_to_cpu(req->OutputBufferLength));
+       if (d_info.out_buf_len < 0) {
+               rc = -EINVAL;
+               goto err_out;
+       }
        d_info.flags = srch_flag;
 
        /*
@@ -4016,12 +4128,11 @@ static int smb2_get_ea(struct ksmbd_work *work, struct ksmbd_file *fp,
                                    le32_to_cpu(req->Flags));
        }
 
-       buf_free_len = work->response_sz -
-                       (get_rfc1002_len(rsp_org) + 4) -
-                       sizeof(struct smb2_query_info_rsp);
-
-       if (le32_to_cpu(req->OutputBufferLength) < buf_free_len)
-               buf_free_len = le32_to_cpu(req->OutputBufferLength);
+       buf_free_len =
+               smb2_calc_max_out_buf_len(work, 8,
+                                         le32_to_cpu(req->OutputBufferLength));
+       if (buf_free_len < 0)
+               return -EINVAL;
 
        rc = ksmbd_vfs_listxattr(path->dentry, &xattr_list);
        if (rc < 0) {
@@ -4152,7 +4263,7 @@ static void get_file_access_info(struct smb2_query_info_rsp *rsp,
 static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
                               struct ksmbd_file *fp, void *rsp_org)
 {
-       struct smb2_file_all_info *basic_info;
+       struct smb2_file_basic_info *basic_info;
        struct kstat stat;
        u64 time;
 
@@ -4162,7 +4273,7 @@ static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
                return -EACCES;
        }
 
-       basic_info = (struct smb2_file_all_info *)rsp->Buffer;
+       basic_info = (struct smb2_file_basic_info *)rsp->Buffer;
        generic_fillattr(file_mnt_user_ns(fp->filp), file_inode(fp->filp),
                         &stat);
        basic_info->CreationTime = cpu_to_le64(fp->create_time);
@@ -4175,9 +4286,8 @@ static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
        basic_info->Attributes = fp->f_ci->m_fattr;
        basic_info->Pad1 = 0;
        rsp->OutputBufferLength =
-               cpu_to_le32(offsetof(struct smb2_file_all_info, AllocationSize));
-       inc_rfc1001_len(rsp_org, offsetof(struct smb2_file_all_info,
-                                         AllocationSize));
+               cpu_to_le32(sizeof(struct smb2_file_basic_info));
+       inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_basic_info));
        return 0;
 }
 
@@ -4333,6 +4443,8 @@ static void get_file_stream_info(struct ksmbd_work *work,
        struct path *path = &fp->filp->f_path;
        ssize_t xattr_list_len;
        int nbytes = 0, streamlen, stream_name_len, next, idx = 0;
+       int buf_free_len;
+       struct smb2_query_info_req *req = ksmbd_req_buf_next(work);
 
        generic_fillattr(file_mnt_user_ns(fp->filp), file_inode(fp->filp),
                         &stat);
@@ -4346,6 +4458,12 @@ static void get_file_stream_info(struct ksmbd_work *work,
                goto out;
        }
 
+       buf_free_len =
+               smb2_calc_max_out_buf_len(work, 8,
+                                         le32_to_cpu(req->OutputBufferLength));
+       if (buf_free_len < 0)
+               goto out;
+
        while (idx < xattr_list_len) {
                stream_name = xattr_list + idx;
                streamlen = strlen(stream_name);
@@ -4370,6 +4488,10 @@ static void get_file_stream_info(struct ksmbd_work *work,
                streamlen = snprintf(stream_buf, streamlen + 1,
                                     ":%s", &stream_name[XATTR_NAME_STREAM_LEN]);
 
+               next = sizeof(struct smb2_file_stream_info) + streamlen * 2;
+               if (next > buf_free_len)
+                       break;
+
                file_info = (struct smb2_file_stream_info *)&rsp->Buffer[nbytes];
                streamlen  = smbConvertToUTF16((__le16 *)file_info->StreamName,
                                               stream_buf, streamlen,
@@ -4380,12 +4502,13 @@ static void get_file_stream_info(struct ksmbd_work *work,
                file_info->StreamSize = cpu_to_le64(stream_name_len);
                file_info->StreamAllocationSize = cpu_to_le64(stream_name_len);
 
-               next = sizeof(struct smb2_file_stream_info) + streamlen;
                nbytes += next;
+               buf_free_len -= next;
                file_info->NextEntryOffset = cpu_to_le32(next);
        }
 
-       if (!S_ISDIR(stat.mode)) {
+       if (!S_ISDIR(stat.mode) &&
+           buf_free_len >= sizeof(struct smb2_file_stream_info) + 7 * 2) {
                file_info = (struct smb2_file_stream_info *)
                        &rsp->Buffer[nbytes];
                streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName,
@@ -5333,7 +5456,7 @@ out:
 static int smb2_create_link(struct ksmbd_work *work,
                            struct ksmbd_share_config *share,
                            struct smb2_file_link_info *file_info,
-                           struct file *filp,
+                           unsigned int buf_len, struct file *filp,
                            struct nls_table *local_nls)
 {
        char *link_name = NULL, *target_name = NULL, *pathname = NULL;
@@ -5341,6 +5464,10 @@ static int smb2_create_link(struct ksmbd_work *work,
        bool file_present = true;
        int rc;
 
+       if (buf_len < (u64)sizeof(struct smb2_file_link_info) +
+                       le32_to_cpu(file_info->FileNameLength))
+               return -EINVAL;
+
        ksmbd_debug(SMB, "setting FILE_LINK_INFORMATION\n");
        pathname = kmalloc(PATH_MAX, GFP_KERNEL);
        if (!pathname)
@@ -5400,12 +5527,11 @@ out:
        return rc;
 }
 
-static int set_file_basic_info(struct ksmbd_file *fp, char *buf,
+static int set_file_basic_info(struct ksmbd_file *fp,
+                              struct smb2_file_basic_info *file_info,
                               struct ksmbd_share_config *share)
 {
-       struct smb2_file_all_info *file_info;
        struct iattr attrs;
-       struct timespec64 ctime;
        struct file *filp;
        struct inode *inode;
        struct user_namespace *user_ns;
@@ -5414,7 +5540,6 @@ static int set_file_basic_info(struct ksmbd_file *fp, char *buf,
        if (!(fp->daccess & FILE_WRITE_ATTRIBUTES_LE))
                return -EACCES;
 
-       file_info = (struct smb2_file_all_info *)buf;
        attrs.ia_valid = 0;
        filp = fp->filp;
        inode = file_inode(filp);
@@ -5428,13 +5553,11 @@ static int set_file_basic_info(struct ksmbd_file *fp, char *buf,
                attrs.ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET);
        }
 
-       if (file_info->ChangeTime) {
+       attrs.ia_valid |= ATTR_CTIME;
+       if (file_info->ChangeTime)
                attrs.ia_ctime = ksmbd_NTtimeToUnix(file_info->ChangeTime);
-               ctime = attrs.ia_ctime;
-               attrs.ia_valid |= ATTR_CTIME;
-       } else {
-               ctime = inode->i_ctime;
-       }
+       else
+               attrs.ia_ctime = inode->i_ctime;
 
        if (file_info->LastWriteTime) {
                attrs.ia_mtime = ksmbd_NTtimeToUnix(file_info->LastWriteTime);
@@ -5480,18 +5603,17 @@ static int set_file_basic_info(struct ksmbd_file *fp, char *buf,
                        return -EACCES;
 
                inode_lock(inode);
+               inode->i_ctime = attrs.ia_ctime;
+               attrs.ia_valid &= ~ATTR_CTIME;
                rc = notify_change(user_ns, dentry, &attrs, NULL);
-               if (!rc) {
-                       inode->i_ctime = ctime;
-                       mark_inode_dirty(inode);
-               }
                inode_unlock(inode);
        }
        return rc;
 }
 
 static int set_file_allocation_info(struct ksmbd_work *work,
-                                   struct ksmbd_file *fp, char *buf)
+                                   struct ksmbd_file *fp,
+                                   struct smb2_file_alloc_info *file_alloc_info)
 {
        /*
         * TODO : It's working fine only when store dos attributes
@@ -5499,7 +5621,6 @@ static int set_file_allocation_info(struct ksmbd_work *work,
         * properly with any smb.conf option
         */
 
-       struct smb2_file_alloc_info *file_alloc_info;
        loff_t alloc_blks;
        struct inode *inode;
        int rc;
@@ -5507,7 +5628,6 @@ static int set_file_allocation_info(struct ksmbd_work *work,
        if (!(fp->daccess & FILE_WRITE_DATA_LE))
                return -EACCES;
 
-       file_alloc_info = (struct smb2_file_alloc_info *)buf;
        alloc_blks = (le64_to_cpu(file_alloc_info->AllocationSize) + 511) >> 9;
        inode = file_inode(fp->filp);
 
@@ -5543,9 +5663,8 @@ static int set_file_allocation_info(struct ksmbd_work *work,
 }
 
 static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
-                               char *buf)
+                               struct smb2_file_eof_info *file_eof_info)
 {
-       struct smb2_file_eof_info *file_eof_info;
        loff_t newsize;
        struct inode *inode;
        int rc;
@@ -5553,7 +5672,6 @@ static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
        if (!(fp->daccess & FILE_WRITE_DATA_LE))
                return -EACCES;
 
-       file_eof_info = (struct smb2_file_eof_info *)buf;
        newsize = le64_to_cpu(file_eof_info->EndOfFile);
        inode = file_inode(fp->filp);
 
@@ -5580,7 +5698,8 @@ static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
 }
 
 static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
-                          char *buf)
+                          struct smb2_file_rename_info *rename_info,
+                          unsigned int buf_len)
 {
        struct user_namespace *user_ns;
        struct ksmbd_file *parent_fp;
@@ -5593,6 +5712,10 @@ static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
                return -EACCES;
        }
 
+       if (buf_len < (u64)sizeof(struct smb2_file_rename_info) +
+                       le32_to_cpu(rename_info->FileNameLength))
+               return -EINVAL;
+
        user_ns = file_mnt_user_ns(fp->filp);
        if (ksmbd_stream_fd(fp))
                goto next;
@@ -5615,14 +5738,13 @@ static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
                }
        }
 next:
-       return smb2_rename(work, fp, user_ns,
-                          (struct smb2_file_rename_info *)buf,
+       return smb2_rename(work, fp, user_ns, rename_info,
                           work->sess->conn->local_nls);
 }
 
-static int set_file_disposition_info(struct ksmbd_file *fp, char *buf)
+static int set_file_disposition_info(struct ksmbd_file *fp,
+                                    struct smb2_file_disposition_info *file_info)
 {
-       struct smb2_file_disposition_info *file_info;
        struct inode *inode;
 
        if (!(fp->daccess & FILE_DELETE_LE)) {
@@ -5631,7 +5753,6 @@ static int set_file_disposition_info(struct ksmbd_file *fp, char *buf)
        }
 
        inode = file_inode(fp->filp);
-       file_info = (struct smb2_file_disposition_info *)buf;
        if (file_info->DeletePending) {
                if (S_ISDIR(inode->i_mode) &&
                    ksmbd_vfs_empty_dir(fp) == -ENOTEMPTY)
@@ -5643,15 +5764,14 @@ static int set_file_disposition_info(struct ksmbd_file *fp, char *buf)
        return 0;
 }
 
-static int set_file_position_info(struct ksmbd_file *fp, char *buf)
+static int set_file_position_info(struct ksmbd_file *fp,
+                                 struct smb2_file_pos_info *file_info)
 {
-       struct smb2_file_pos_info *file_info;
        loff_t current_byte_offset;
        unsigned long sector_size;
        struct inode *inode;
 
        inode = file_inode(fp->filp);
-       file_info = (struct smb2_file_pos_info *)buf;
        current_byte_offset = le64_to_cpu(file_info->CurrentByteOffset);
        sector_size = inode->i_sb->s_blocksize;
 
@@ -5667,12 +5787,11 @@ static int set_file_position_info(struct ksmbd_file *fp, char *buf)
        return 0;
 }
 
-static int set_file_mode_info(struct ksmbd_file *fp, char *buf)
+static int set_file_mode_info(struct ksmbd_file *fp,
+                             struct smb2_file_mode_info *file_info)
 {
-       struct smb2_file_mode_info *file_info;
        __le32 mode;
 
-       file_info = (struct smb2_file_mode_info *)buf;
        mode = file_info->Mode;
 
        if ((mode & ~FILE_MODE_INFO_MASK) ||
@@ -5702,40 +5821,74 @@ static int set_file_mode_info(struct ksmbd_file *fp, char *buf)
  * TODO: need to implement an error handling for STATUS_INFO_LENGTH_MISMATCH
  */
 static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
-                             int info_class, char *buf,
+                             struct smb2_set_info_req *req,
                              struct ksmbd_share_config *share)
 {
-       switch (info_class) {
+       unsigned int buf_len = le32_to_cpu(req->BufferLength);
+
+       switch (req->FileInfoClass) {
        case FILE_BASIC_INFORMATION:
-               return set_file_basic_info(fp, buf, share);
+       {
+               if (buf_len < sizeof(struct smb2_file_basic_info))
+                       return -EINVAL;
 
+               return set_file_basic_info(fp, (struct smb2_file_basic_info *)req->Buffer, share);
+       }
        case FILE_ALLOCATION_INFORMATION:
-               return set_file_allocation_info(work, fp, buf);
+       {
+               if (buf_len < sizeof(struct smb2_file_alloc_info))
+                       return -EINVAL;
 
+               return set_file_allocation_info(work, fp,
+                                               (struct smb2_file_alloc_info *)req->Buffer);
+       }
        case FILE_END_OF_FILE_INFORMATION:
-               return set_end_of_file_info(work, fp, buf);
+       {
+               if (buf_len < sizeof(struct smb2_file_eof_info))
+                       return -EINVAL;
 
+               return set_end_of_file_info(work, fp,
+                                           (struct smb2_file_eof_info *)req->Buffer);
+       }
        case FILE_RENAME_INFORMATION:
+       {
                if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
                        ksmbd_debug(SMB,
                                    "User does not have write permission\n");
                        return -EACCES;
                }
-               return set_rename_info(work, fp, buf);
 
+               if (buf_len < sizeof(struct smb2_file_rename_info))
+                       return -EINVAL;
+
+               return set_rename_info(work, fp,
+                                      (struct smb2_file_rename_info *)req->Buffer,
+                                      buf_len);
+       }
        case FILE_LINK_INFORMATION:
+       {
+               if (buf_len < sizeof(struct smb2_file_link_info))
+                       return -EINVAL;
+
                return smb2_create_link(work, work->tcon->share_conf,
-                                       (struct smb2_file_link_info *)buf, fp->filp,
+                                       (struct smb2_file_link_info *)req->Buffer,
+                                       buf_len, fp->filp,
                                        work->sess->conn->local_nls);
-
+       }
        case FILE_DISPOSITION_INFORMATION:
+       {
                if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
                        ksmbd_debug(SMB,
                                    "User does not have write permission\n");
                        return -EACCES;
                }
-               return set_file_disposition_info(fp, buf);
 
+               if (buf_len < sizeof(struct smb2_file_disposition_info))
+                       return -EINVAL;
+
+               return set_file_disposition_info(fp,
+                                                (struct smb2_file_disposition_info *)req->Buffer);
+       }
        case FILE_FULL_EA_INFORMATION:
        {
                if (!(fp->daccess & FILE_WRITE_EA_LE)) {
@@ -5744,18 +5897,29 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
                        return -EACCES;
                }
 
-               return smb2_set_ea((struct smb2_ea_info *)buf,
-                                  &fp->filp->f_path);
-       }
+               if (buf_len < sizeof(struct smb2_ea_info))
+                       return -EINVAL;
 
+               return smb2_set_ea((struct smb2_ea_info *)req->Buffer,
+                                  buf_len, &fp->filp->f_path);
+       }
        case FILE_POSITION_INFORMATION:
-               return set_file_position_info(fp, buf);
+       {
+               if (buf_len < sizeof(struct smb2_file_pos_info))
+                       return -EINVAL;
 
+               return set_file_position_info(fp, (struct smb2_file_pos_info *)req->Buffer);
+       }
        case FILE_MODE_INFORMATION:
-               return set_file_mode_info(fp, buf);
+       {
+               if (buf_len < sizeof(struct smb2_file_mode_info))
+                       return -EINVAL;
+
+               return set_file_mode_info(fp, (struct smb2_file_mode_info *)req->Buffer);
+       }
        }
 
-       pr_err("Unimplemented Fileinfoclass :%d\n", info_class);
+       pr_err("Unimplemented Fileinfoclass :%d\n", req->FileInfoClass);
        return -EOPNOTSUPP;
 }
 
@@ -5816,8 +5980,7 @@ int smb2_set_info(struct ksmbd_work *work)
        switch (req->InfoType) {
        case SMB2_O_INFO_FILE:
                ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n");
-               rc = smb2_set_info_file(work, fp, req->FileInfoClass,
-                                       req->Buffer, work->tcon->share_conf);
+               rc = smb2_set_info_file(work, fp, req, work->tcon->share_conf);
                break;
        case SMB2_O_INFO_SECURITY:
                ksmbd_debug(SMB, "GOT SMB2_O_INFO_SECURITY\n");
@@ -6106,8 +6269,7 @@ static noinline int smb2_write_pipe(struct ksmbd_work *work)
            (offsetof(struct smb2_write_req, Buffer) - 4)) {
                data_buf = (char *)&req->Buffer[0];
        } else {
-               if ((le16_to_cpu(req->DataOffset) > get_rfc1002_len(req)) ||
-                   (le16_to_cpu(req->DataOffset) + length > get_rfc1002_len(req))) {
+               if ((u64)le16_to_cpu(req->DataOffset) + length > get_rfc1002_len(req)) {
                        pr_err("invalid write data offset %u, smb_len %u\n",
                               le16_to_cpu(req->DataOffset),
                               get_rfc1002_len(req));
@@ -6265,8 +6427,7 @@ int smb2_write(struct ksmbd_work *work)
                    (offsetof(struct smb2_write_req, Buffer) - 4)) {
                        data_buf = (char *)&req->Buffer[0];
                } else {
-                       if ((le16_to_cpu(req->DataOffset) > get_rfc1002_len(req)) ||
-                           (le16_to_cpu(req->DataOffset) + length > get_rfc1002_len(req))) {
+                       if ((u64)le16_to_cpu(req->DataOffset) + length > get_rfc1002_len(req)) {
                                pr_err("invalid write data offset %u, smb_len %u\n",
                                       le16_to_cpu(req->DataOffset),
                                       get_rfc1002_len(req));
@@ -6909,24 +7070,26 @@ out2:
        return err;
 }
 
-static int fsctl_copychunk(struct ksmbd_work *work, struct smb2_ioctl_req *req,
+static int fsctl_copychunk(struct ksmbd_work *work,
+                          struct copychunk_ioctl_req *ci_req,
+                          unsigned int cnt_code,
+                          unsigned int input_count,
+                          unsigned long long volatile_id,
+                          unsigned long long persistent_id,
                           struct smb2_ioctl_rsp *rsp)
 {
-       struct copychunk_ioctl_req *ci_req;
        struct copychunk_ioctl_rsp *ci_rsp;
        struct ksmbd_file *src_fp = NULL, *dst_fp = NULL;
        struct srv_copychunk *chunks;
        unsigned int i, chunk_count, chunk_count_written = 0;
        unsigned int chunk_size_written = 0;
        loff_t total_size_written = 0;
-       int ret, cnt_code;
+       int ret = 0;
 
-       cnt_code = le32_to_cpu(req->CntCode);
-       ci_req = (struct copychunk_ioctl_req *)&req->Buffer[0];
        ci_rsp = (struct copychunk_ioctl_rsp *)&rsp->Buffer[0];
 
-       rsp->VolatileFileId = req->VolatileFileId;
-       rsp->PersistentFileId = req->PersistentFileId;
+       rsp->VolatileFileId = cpu_to_le64(volatile_id);
+       rsp->PersistentFileId = cpu_to_le64(persistent_id);
        ci_rsp->ChunksWritten =
                cpu_to_le32(ksmbd_server_side_copy_max_chunk_count());
        ci_rsp->ChunkBytesWritten =
@@ -6936,12 +7099,13 @@ static int fsctl_copychunk(struct ksmbd_work *work, struct smb2_ioctl_req *req,
 
        chunks = (struct srv_copychunk *)&ci_req->Chunks[0];
        chunk_count = le32_to_cpu(ci_req->ChunkCount);
+       if (chunk_count == 0)
+               goto out;
        total_size_written = 0;
 
        /* verify the SRV_COPYCHUNK_COPY packet */
        if (chunk_count > ksmbd_server_side_copy_max_chunk_count() ||
-           le32_to_cpu(req->InputCount) <
-            offsetof(struct copychunk_ioctl_req, Chunks) +
+           input_count < offsetof(struct copychunk_ioctl_req, Chunks) +
             chunk_count * sizeof(struct srv_copychunk)) {
                rsp->hdr.Status = STATUS_INVALID_PARAMETER;
                return -EINVAL;
@@ -6962,9 +7126,7 @@ static int fsctl_copychunk(struct ksmbd_work *work, struct smb2_ioctl_req *req,
 
        src_fp = ksmbd_lookup_foreign_fd(work,
                                         le64_to_cpu(ci_req->ResumeKey[0]));
-       dst_fp = ksmbd_lookup_fd_slow(work,
-                                     le64_to_cpu(req->VolatileFileId),
-                                     le64_to_cpu(req->PersistentFileId));
+       dst_fp = ksmbd_lookup_fd_slow(work, volatile_id, persistent_id);
        ret = -EINVAL;
        if (!src_fp ||
            src_fp->persistent_id != le64_to_cpu(ci_req->ResumeKey[1])) {
@@ -7039,8 +7201,8 @@ static __be32 idev_ipv4_address(struct in_device *idev)
 }
 
 static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
-                                       struct smb2_ioctl_req *req,
-                                       struct smb2_ioctl_rsp *rsp)
+                                       struct smb2_ioctl_rsp *rsp,
+                                       unsigned int out_buf_len)
 {
        struct network_interface_info_ioctl_rsp *nii_rsp = NULL;
        int nbytes = 0;
@@ -7052,6 +7214,12 @@ static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
 
        rtnl_lock();
        for_each_netdev(&init_net, netdev) {
+               if (out_buf_len <
+                   nbytes + sizeof(struct network_interface_info_ioctl_rsp)) {
+                       rtnl_unlock();
+                       return -ENOSPC;
+               }
+
                if (netdev->type == ARPHRD_LOOPBACK)
                        continue;
 
@@ -7131,11 +7299,6 @@ static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
        if (nii_rsp)
                nii_rsp->Next = 0;
 
-       if (!nbytes) {
-               rsp->hdr.Status = STATUS_BUFFER_TOO_SMALL;
-               return -EINVAL;
-       }
-
        rsp->PersistentFileId = cpu_to_le64(SMB2_NO_FID);
        rsp->VolatileFileId = cpu_to_le64(SMB2_NO_FID);
        return nbytes;
@@ -7143,11 +7306,16 @@ static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
 
 static int fsctl_validate_negotiate_info(struct ksmbd_conn *conn,
                                         struct validate_negotiate_info_req *neg_req,
-                                        struct validate_negotiate_info_rsp *neg_rsp)
+                                        struct validate_negotiate_info_rsp *neg_rsp,
+                                        unsigned int in_buf_len)
 {
        int ret = 0;
        int dialect;
 
+       if (in_buf_len < sizeof(struct validate_negotiate_info_req) +
+                       le16_to_cpu(neg_req->DialectCount) * sizeof(__le16))
+               return -EINVAL;
+
        dialect = ksmbd_lookup_dialect_by_id(neg_req->Dialects,
                                             neg_req->DialectCount);
        if (dialect == BAD_PROT_ID || dialect != conn->dialect) {
@@ -7181,7 +7349,7 @@ err_out:
 static int fsctl_query_allocated_ranges(struct ksmbd_work *work, u64 id,
                                        struct file_allocated_range_buffer *qar_req,
                                        struct file_allocated_range_buffer *qar_rsp,
-                                       int in_count, int *out_count)
+                                       unsigned int in_count, unsigned int *out_count)
 {
        struct ksmbd_file *fp;
        loff_t start, length;
@@ -7208,7 +7376,8 @@ static int fsctl_query_allocated_ranges(struct ksmbd_work *work, u64 id,
 }
 
 static int fsctl_pipe_transceive(struct ksmbd_work *work, u64 id,
-                                int out_buf_len, struct smb2_ioctl_req *req,
+                                unsigned int out_buf_len,
+                                struct smb2_ioctl_req *req,
                                 struct smb2_ioctl_rsp *rsp)
 {
        struct ksmbd_rpc_command *rpc_resp;
@@ -7322,8 +7491,7 @@ int smb2_ioctl(struct ksmbd_work *work)
 {
        struct smb2_ioctl_req *req;
        struct smb2_ioctl_rsp *rsp, *rsp_org;
-       int cnt_code, nbytes = 0;
-       int out_buf_len;
+       unsigned int cnt_code, nbytes = 0, out_buf_len, in_buf_len;
        u64 id = KSMBD_NO_FID;
        struct ksmbd_conn *conn = work->conn;
        int ret = 0;
@@ -7351,8 +7519,14 @@ int smb2_ioctl(struct ksmbd_work *work)
        }
 
        cnt_code = le32_to_cpu(req->CntCode);
-       out_buf_len = le32_to_cpu(req->MaxOutputResponse);
-       out_buf_len = min(KSMBD_IPC_MAX_PAYLOAD, out_buf_len);
+       ret = smb2_calc_max_out_buf_len(work, 48,
+                                       le32_to_cpu(req->MaxOutputResponse));
+       if (ret < 0) {
+               rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+               goto out;
+       }
+       out_buf_len = (unsigned int)ret;
+       in_buf_len = le32_to_cpu(req->InputCount);
 
        switch (cnt_code) {
        case FSCTL_DFS_GET_REFERRALS:
@@ -7380,6 +7554,7 @@ int smb2_ioctl(struct ksmbd_work *work)
                break;
        }
        case FSCTL_PIPE_TRANSCEIVE:
+               out_buf_len = min_t(u32, KSMBD_IPC_MAX_PAYLOAD, out_buf_len);
                nbytes = fsctl_pipe_transceive(work, id, out_buf_len, req, rsp);
                break;
        case FSCTL_VALIDATE_NEGOTIATE_INFO:
@@ -7388,9 +7563,16 @@ int smb2_ioctl(struct ksmbd_work *work)
                        goto out;
                }
 
+               if (in_buf_len < sizeof(struct validate_negotiate_info_req))
+                       return -EINVAL;
+
+               if (out_buf_len < sizeof(struct validate_negotiate_info_rsp))
+                       return -EINVAL;
+
                ret = fsctl_validate_negotiate_info(conn,
                        (struct validate_negotiate_info_req *)&req->Buffer[0],
-                       (struct validate_negotiate_info_rsp *)&rsp->Buffer[0]);
+                       (struct validate_negotiate_info_rsp *)&rsp->Buffer[0],
+                       in_buf_len);
                if (ret < 0)
                        goto out;
 
@@ -7399,9 +7581,10 @@ int smb2_ioctl(struct ksmbd_work *work)
                rsp->VolatileFileId = cpu_to_le64(SMB2_NO_FID);
                break;
        case FSCTL_QUERY_NETWORK_INTERFACE_INFO:
-               nbytes = fsctl_query_iface_info_ioctl(conn, req, rsp);
-               if (nbytes < 0)
+               ret = fsctl_query_iface_info_ioctl(conn, rsp, out_buf_len);
+               if (ret < 0)
                        goto out;
+               nbytes = ret;
                break;
        case FSCTL_REQUEST_RESUME_KEY:
                if (out_buf_len < sizeof(struct resume_key_ioctl_rsp)) {
@@ -7426,15 +7609,33 @@ int smb2_ioctl(struct ksmbd_work *work)
                        goto out;
                }
 
+               if (in_buf_len < sizeof(struct copychunk_ioctl_req)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
                if (out_buf_len < sizeof(struct copychunk_ioctl_rsp)) {
                        ret = -EINVAL;
                        goto out;
                }
 
                nbytes = sizeof(struct copychunk_ioctl_rsp);
-               fsctl_copychunk(work, req, rsp);
+               rsp->VolatileFileId = req->VolatileFileId;
+               rsp->PersistentFileId = req->PersistentFileId;
+               fsctl_copychunk(work,
+                               (struct copychunk_ioctl_req *)&req->Buffer[0],
+                               le32_to_cpu(req->CntCode),
+                               le32_to_cpu(req->InputCount),
+                               le64_to_cpu(req->VolatileFileId),
+                               le64_to_cpu(req->PersistentFileId),
+                               rsp);
                break;
        case FSCTL_SET_SPARSE:
+               if (in_buf_len < sizeof(struct file_sparse)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
                ret = fsctl_set_sparse(work, id,
                                       (struct file_sparse *)&req->Buffer[0]);
                if (ret < 0)
@@ -7453,6 +7654,11 @@ int smb2_ioctl(struct ksmbd_work *work)
                        goto out;
                }
 
+               if (in_buf_len < sizeof(struct file_zero_data_information)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
                zero_data =
                        (struct file_zero_data_information *)&req->Buffer[0];
 
@@ -7472,6 +7678,11 @@ int smb2_ioctl(struct ksmbd_work *work)
                break;
        }
        case FSCTL_QUERY_ALLOCATED_RANGES:
+               if (in_buf_len < sizeof(struct file_allocated_range_buffer)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
                ret = fsctl_query_allocated_ranges(work, id,
                        (struct file_allocated_range_buffer *)&req->Buffer[0],
                        (struct file_allocated_range_buffer *)&rsp->Buffer[0],
@@ -7512,6 +7723,11 @@ int smb2_ioctl(struct ksmbd_work *work)
                struct duplicate_extents_to_file *dup_ext;
                loff_t src_off, dst_off, length, cloned;
 
+               if (in_buf_len < sizeof(struct duplicate_extents_to_file)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
                dup_ext = (struct duplicate_extents_to_file *)&req->Buffer[0];
 
                fp_in = ksmbd_lookup_fd_slow(work, dup_ext->VolatileFileHandle,
@@ -7582,6 +7798,8 @@ out:
                rsp->hdr.Status = STATUS_OBJECT_NAME_NOT_FOUND;
        else if (ret == -EOPNOTSUPP)
                rsp->hdr.Status = STATUS_NOT_SUPPORTED;
+       else if (ret == -ENOSPC)
+               rsp->hdr.Status = STATUS_BUFFER_TOO_SMALL;
        else if (ret < 0 || rsp->hdr.Status == 0)
                rsp->hdr.Status = STATUS_INVALID_PARAMETER;
        smb2_set_err_rsp(work);
@@ -8171,7 +8389,8 @@ void smb3_preauth_hash_rsp(struct ksmbd_work *work)
 
        WORK_BUFFERS(work, req, rsp);
 
-       if (le16_to_cpu(req->Command) == SMB2_NEGOTIATE_HE)
+       if (le16_to_cpu(req->Command) == SMB2_NEGOTIATE_HE &&
+           conn->preauth_info)
                ksmbd_gen_preauth_integrity_hash(conn, (char *)rsp,
                                                 conn->preauth_info->Preauth_HashValue);
 
@@ -8275,31 +8494,29 @@ int smb3_decrypt_req(struct ksmbd_work *work)
        struct smb2_hdr *hdr;
        unsigned int pdu_length = get_rfc1002_len(buf);
        struct kvec iov[2];
-       unsigned int buf_data_size = pdu_length + 4 -
+       int buf_data_size = pdu_length + 4 -
                sizeof(struct smb2_transform_hdr);
        struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
-       unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
        int rc = 0;
 
-       sess = ksmbd_session_lookup_all(conn, le64_to_cpu(tr_hdr->SessionId));
-       if (!sess) {
-               pr_err("invalid session id(%llx) in transform header\n",
-                      le64_to_cpu(tr_hdr->SessionId));
-               return -ECONNABORTED;
-       }
-
-       if (pdu_length + 4 <
-           sizeof(struct smb2_transform_hdr) + sizeof(struct smb2_hdr)) {
+       if (buf_data_size < sizeof(struct smb2_hdr)) {
                pr_err("Transform message is too small (%u)\n",
                       pdu_length);
                return -ECONNABORTED;
        }
 
-       if (pdu_length + 4 < orig_len + sizeof(struct smb2_transform_hdr)) {
+       if (buf_data_size < le32_to_cpu(tr_hdr->OriginalMessageSize)) {
                pr_err("Transform message is broken\n");
                return -ECONNABORTED;
        }
 
+       sess = ksmbd_session_lookup_all(conn, le64_to_cpu(tr_hdr->SessionId));
+       if (!sess) {
+               pr_err("invalid session id(%llx) in transform header\n",
+                      le64_to_cpu(tr_hdr->SessionId));
+               return -ECONNABORTED;
+       }
+
        iov[0].iov_base = buf;
        iov[0].iov_len = sizeof(struct smb2_transform_hdr);
        iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
index bcec845b03f344cec4838591143c3b0e6dbbf159..ff5a2f01d34ae561ba4d20a0cfaee476f0e8e9a1 100644 (file)
 #define SMB21_DEFAULT_IOSIZE   (1024 * 1024)
 #define SMB3_DEFAULT_IOSIZE    (4 * 1024 * 1024)
 #define SMB3_DEFAULT_TRANS_SIZE        (1024 * 1024)
+#define SMB3_MIN_IOSIZE        (64 * 1024)
+#define SMB3_MAX_IOSIZE        (8 * 1024 * 1024)
 
 /*
  * SMB2 Header Definition
@@ -1464,6 +1466,15 @@ struct smb2_file_all_info { /* data block encoding of response to level 18 */
        char   FileName[1];
 } __packed; /* level 18 Query */
 
+struct smb2_file_basic_info { /* data block encoding of response to level 18 */
+       __le64 CreationTime;    /* Beginning of FILE_BASIC_INFO equivalent */
+       __le64 LastAccessTime;
+       __le64 LastWriteTime;
+       __le64 ChangeTime;
+       __le32 Attributes;
+       __u32  Pad1;            /* End of FILE_BASIC_INFO_INFO equivalent */
+} __packed;
+
 struct smb2_file_alt_name_info {
        __le32 FileNameLength;
        char FileName[0];
@@ -1628,7 +1639,6 @@ struct smb2_posix_info {
 } __packed;
 
 /* functions */
-int init_smb2_0_server(struct ksmbd_conn *conn);
 void init_smb2_1_server(struct ksmbd_conn *conn);
 void init_smb3_0_server(struct ksmbd_conn *conn);
 void init_smb3_02_server(struct ksmbd_conn *conn);
index 40f4fafa2e112ae2ad7911221975ef495281e3ac..707490ab1f4c42c29b7b47bda99288c866bab943 100644 (file)
@@ -21,7 +21,6 @@ static const char basechars[43] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_-!@#$%";
 #define MAGIC_CHAR '~'
 #define PERIOD '.'
 #define mangle(V) ((char)(basechars[(V) % MANGLE_BASE]))
-#define KSMBD_MIN_SUPPORTED_HEADER_SIZE        (sizeof(struct smb2_hdr))
 
 struct smb_protocol {
        int             index;
@@ -89,7 +88,7 @@ unsigned int ksmbd_server_side_copy_max_total_size(void)
 
 inline int ksmbd_min_protocol(void)
 {
-       return SMB2_PROT;
+       return SMB21_PROT;
 }
 
 inline int ksmbd_max_protocol(void)
@@ -155,20 +154,7 @@ int ksmbd_verify_smb_message(struct ksmbd_work *work)
  */
 bool ksmbd_smb_request(struct ksmbd_conn *conn)
 {
-       int type = *(char *)conn->request_buf;
-
-       switch (type) {
-       case RFC1002_SESSION_MESSAGE:
-               /* Regular SMB request */
-               return true;
-       case RFC1002_SESSION_KEEP_ALIVE:
-               ksmbd_debug(SMB, "RFC 1002 session keep alive\n");
-               break;
-       default:
-               ksmbd_debug(SMB, "RFC 1002 unknown request type 0x%x\n", type);
-       }
-
-       return false;
+       return conn->request_buf[0] == 0;
 }
 
 static bool supported_protocol(int idx)
@@ -182,10 +168,12 @@ static bool supported_protocol(int idx)
                idx <= server_conf.max_protocol);
 }
 
-static char *next_dialect(char *dialect, int *next_off)
+static char *next_dialect(char *dialect, int *next_off, int bcount)
 {
        dialect = dialect + *next_off;
-       *next_off = strlen(dialect);
+       *next_off = strnlen(dialect, bcount);
+       if (dialect[*next_off] != '\0')
+               return NULL;
        return dialect;
 }
 
@@ -200,7 +188,9 @@ static int ksmbd_lookup_dialect_by_name(char *cli_dialects, __le16 byte_count)
                dialect = cli_dialects;
                bcount = le16_to_cpu(byte_count);
                do {
-                       dialect = next_dialect(dialect, &next);
+                       dialect = next_dialect(dialect, &next, bcount);
+                       if (!dialect)
+                               break;
                        ksmbd_debug(SMB, "client requested dialect %s\n",
                                    dialect);
                        if (!strcmp(dialect, smb1_protos[i].name)) {
@@ -248,13 +238,22 @@ int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count)
 
 static int ksmbd_negotiate_smb_dialect(void *buf)
 {
-       __le32 proto;
+       int smb_buf_length = get_rfc1002_len(buf);
+       __le32 proto = ((struct smb2_hdr *)buf)->ProtocolId;
 
-       proto = ((struct smb2_hdr *)buf)->ProtocolId;
        if (proto == SMB2_PROTO_NUMBER) {
                struct smb2_negotiate_req *req;
+               int smb2_neg_size =
+                       offsetof(struct smb2_negotiate_req, Dialects) - 4;
 
                req = (struct smb2_negotiate_req *)buf;
+               if (smb2_neg_size > smb_buf_length)
+                       goto err_out;
+
+               if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >
+                   smb_buf_length)
+                       goto err_out;
+
                return ksmbd_lookup_dialect_by_id(req->Dialects,
                                                  req->DialectCount);
        }
@@ -264,10 +263,19 @@ static int ksmbd_negotiate_smb_dialect(void *buf)
                struct smb_negotiate_req *req;
 
                req = (struct smb_negotiate_req *)buf;
+               if (le16_to_cpu(req->ByteCount) < 2)
+                       goto err_out;
+
+               if (offsetof(struct smb_negotiate_req, DialectsArray) - 4 +
+                       le16_to_cpu(req->ByteCount) > smb_buf_length) {
+                       goto err_out;
+               }
+
                return ksmbd_lookup_dialect_by_name(req->DialectsArray,
                                                    req->ByteCount);
        }
 
+err_out:
        return BAD_PROT_ID;
 }
 
@@ -285,11 +293,6 @@ int ksmbd_init_smb_server(struct ksmbd_work *work)
        return 0;
 }
 
-bool ksmbd_pdu_size_has_room(unsigned int pdu)
-{
-       return (pdu >= KSMBD_MIN_SUPPORTED_HEADER_SIZE - 4);
-}
-
 int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
                                      struct ksmbd_file *dir,
                                      struct ksmbd_dir_info *d_info,
@@ -424,7 +427,7 @@ int ksmbd_extract_shortname(struct ksmbd_conn *conn, const char *longname,
 
 static int __smb2_negotiate(struct ksmbd_conn *conn)
 {
-       return (conn->dialect >= SMB20_PROT_ID &&
+       return (conn->dialect >= SMB21_PROT_ID &&
                conn->dialect <= SMB311_PROT_ID);
 }
 
@@ -454,7 +457,7 @@ int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
                }
        }
 
-       if (command == SMB2_NEGOTIATE_HE) {
+       if (command == SMB2_NEGOTIATE_HE && __smb2_negotiate(conn)) {
                ret = smb2_handle_negotiate(work);
                init_smb2_neg_rsp(work);
                return ret;
index 0a6af447cc4515711fad2abd476479d586ba15d6..6e79e7577f6b7b1d31837101e8e9bdd0dbe38933 100644 (file)
 #define CIFS_DEFAULT_IOSIZE    (64 * 1024)
 #define MAX_CIFS_SMALL_BUFFER_SIZE 448 /* big enough for most */
 
-/* RFC 1002 session packet types */
-#define RFC1002_SESSION_MESSAGE                        0x00
-#define RFC1002_SESSION_REQUEST                        0x81
-#define RFC1002_POSITIVE_SESSION_RESPONSE      0x82
-#define RFC1002_NEGATIVE_SESSION_RESPONSE      0x83
-#define RFC1002_RETARGET_SESSION_RESPONSE      0x84
-#define RFC1002_SESSION_KEEP_ALIVE             0x85
+#define MAX_STREAM_PROT_LEN    0x00FFFFFF
 
 /* Responses when opening a file. */
 #define F_SUPERSEDED   0
@@ -501,8 +495,6 @@ int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count);
 
 int ksmbd_init_smb_server(struct ksmbd_work *work);
 
-bool ksmbd_pdu_size_has_room(unsigned int pdu);
-
 struct ksmbd_kstat;
 int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work,
                                      int info_level,
index 0a95cdec8c800d1f437d2e3c1555ac432ec7af42..bd792db326239410982461467eef79d33886cd2a 100644 (file)
@@ -380,7 +380,7 @@ static void parse_dacl(struct user_namespace *user_ns,
 {
        int i, ret;
        int num_aces = 0;
-       int acl_size;
+       unsigned int acl_size;
        char *acl_base;
        struct smb_ace **ppace;
        struct posix_acl_entry *cf_pace, *cf_pdace;
@@ -392,7 +392,7 @@ static void parse_dacl(struct user_namespace *user_ns,
                return;
 
        /* validate that we do not go past end of acl */
-       if (end_of_acl <= (char *)pdacl ||
+       if (end_of_acl < (char *)pdacl + sizeof(struct smb_acl) ||
            end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
                pr_err("ACL too small to parse DACL\n");
                return;
@@ -431,8 +431,22 @@ static void parse_dacl(struct user_namespace *user_ns,
         * user/group/other have no permissions
         */
        for (i = 0; i < num_aces; ++i) {
+               if (end_of_acl - acl_base < acl_size)
+                       break;
+
                ppace[i] = (struct smb_ace *)(acl_base + acl_size);
                acl_base = (char *)ppace[i];
+               acl_size = offsetof(struct smb_ace, sid) +
+                       offsetof(struct smb_sid, sub_auth);
+
+               if (end_of_acl - acl_base < acl_size ||
+                   ppace[i]->sid.num_subauth > SID_MAX_SUB_AUTHORITIES ||
+                   (end_of_acl - acl_base <
+                    acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth) ||
+                   (le16_to_cpu(ppace[i]->size) <
+                    acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth))
+                       break;
+
                acl_size = le16_to_cpu(ppace[i]->size);
                ppace[i]->access_req =
                        smb_map_generic_desired_access(ppace[i]->access_req);
@@ -807,6 +821,9 @@ int parse_sec_desc(struct user_namespace *user_ns, struct smb_ntsd *pntsd,
        if (!pntsd)
                return -EIO;
 
+       if (acl_len < sizeof(struct smb_ntsd))
+               return -EINVAL;
+
        owner_sid_ptr = (struct smb_sid *)((char *)pntsd +
                        le32_to_cpu(pntsd->osidoffset));
        group_sid_ptr = (struct smb_sid *)((char *)pntsd +
index 44aea33a67fa746a298fa045e8036f929814fa68..1acf1892a466c10acefcb6ee9700e79268abf7fa 100644 (file)
@@ -601,7 +601,7 @@ int ksmbd_ipc_tree_disconnect_request(unsigned long long session_id,
        return ret;
 }
 
-int ksmbd_ipc_logout_request(const char *account)
+int ksmbd_ipc_logout_request(const char *account, int flags)
 {
        struct ksmbd_ipc_msg *msg;
        struct ksmbd_logout_request *req;
@@ -616,6 +616,7 @@ int ksmbd_ipc_logout_request(const char *account)
 
        msg->type = KSMBD_EVENT_LOGOUT_REQUEST;
        req = (struct ksmbd_logout_request *)msg->payload;
+       req->account_flags = flags;
        strscpy(req->account, account, KSMBD_REQ_MAX_ACCOUNT_NAME_SZ);
 
        ret = ipc_msg_send(msg);
index 9eacc895ffdbac44b993340cf79d902d782086d9..5e5b90a0c1879c3656b5e91a84210f296a99b6d5 100644 (file)
@@ -25,7 +25,7 @@ ksmbd_ipc_tree_connect_request(struct ksmbd_session *sess,
                               struct sockaddr *peer_addr);
 int ksmbd_ipc_tree_disconnect_request(unsigned long long session_id,
                                      unsigned long long connect_id);
-int ksmbd_ipc_logout_request(const char *account);
+int ksmbd_ipc_logout_request(const char *account, int flags);
 struct ksmbd_share_config_response *
 ksmbd_ipc_share_config_request(const char *name);
 struct ksmbd_spnego_authen_response *
index 3a7fa23ba8508c952eb498154adbb9ca8eaa0ee0..a2fd5a4d4cd5e4924dfb376ee16ebcb8bf4c4e04 100644 (file)
@@ -549,6 +549,10 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
 
        switch (recvmsg->type) {
        case SMB_DIRECT_MSG_NEGOTIATE_REQ:
+               if (wc->byte_len < sizeof(struct smb_direct_negotiate_req)) {
+                       put_empty_recvmsg(t, recvmsg);
+                       return;
+               }
                t->negotiation_requested = true;
                t->full_packet_received = true;
                wake_up_interruptible(&t->wait_status);
@@ -556,10 +560,23 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
        case SMB_DIRECT_MSG_DATA_TRANSFER: {
                struct smb_direct_data_transfer *data_transfer =
                        (struct smb_direct_data_transfer *)recvmsg->packet;
-               int data_length = le32_to_cpu(data_transfer->data_length);
+               unsigned int data_length;
                int avail_recvmsg_count, receive_credits;
 
+               if (wc->byte_len <
+                   offsetof(struct smb_direct_data_transfer, padding)) {
+                       put_empty_recvmsg(t, recvmsg);
+                       return;
+               }
+
+               data_length = le32_to_cpu(data_transfer->data_length);
                if (data_length) {
+                       if (wc->byte_len < sizeof(struct smb_direct_data_transfer) +
+                           (u64)data_length) {
+                               put_empty_recvmsg(t, recvmsg);
+                               return;
+                       }
+
                        if (t->full_packet_received)
                                recvmsg->first_segment = true;
 
@@ -568,7 +585,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
                        else
                                t->full_packet_received = true;
 
-                       enqueue_reassembly(t, recvmsg, data_length);
+                       enqueue_reassembly(t, recvmsg, (int)data_length);
                        wake_up_interruptible(&t->wait_reassembly_queue);
 
                        spin_lock(&t->receive_credit_lock);
index dc15a5ecd2e028daa56bc6272021d454ac111ce5..c14320e03b698629ecb3199e37ded26b284ed37c 100644 (file)
@@ -215,7 +215,7 @@ out_error:
  * ksmbd_kthread_fn() - listen to new SMB connections and callback server
  * @p:         arguments to forker thread
  *
- * Return:     Returns a task_struct or ERR_PTR
+ * Return:     0 on success, error number otherwise
  */
 static int ksmbd_kthread_fn(void *p)
 {
@@ -387,7 +387,7 @@ static void tcp_destroy_socket(struct socket *ksmbd_socket)
 /**
  * create_socket - create socket for ksmbd/0
  *
- * Return:     Returns a task_struct or ERR_PTR
+ * Return:     0 on success, error number otherwise
  */
 static int create_socket(struct interface *iface)
 {
index b41954294d38081b0e32a0b62bd0dafb6c1273ff..835b384b089593ee5c77cf9afe35871996074a3e 100644 (file)
@@ -1023,7 +1023,7 @@ int ksmbd_vfs_zero_data(struct ksmbd_work *work, struct ksmbd_file *fp,
 
 int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
                         struct file_allocated_range_buffer *ranges,
-                        int in_count, int *out_count)
+                        unsigned int in_count, unsigned int *out_count)
 {
        struct file *f = fp->filp;
        struct inode *inode = file_inode(fp->filp);
index 7b1dcaa3fbdc3121ac42bb967f4b13b1794ca05b..b0d5b8feb4a36ba26868dc5da84728e8b2f0049e 100644 (file)
@@ -166,7 +166,7 @@ int ksmbd_vfs_zero_data(struct ksmbd_work *work, struct ksmbd_file *fp,
 struct file_allocated_range_buffer;
 int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
                         struct file_allocated_range_buffer *ranges,
-                        int in_count, int *out_count);
+                        unsigned int in_count, unsigned int *out_count);
 int ksmbd_vfs_unlink(struct user_namespace *user_ns,
                     struct dentry *dir, struct dentry *dentry);
 void *ksmbd_vfs_init_kstat(char **p, struct ksmbd_kstat *ksmbd_kstat);
index 0b6cd3b8734c6e1643baad8c5bd668cd161c6988..994ec22d40402b7a7c3c5206e434d114ca81298a 100644 (file)
@@ -150,7 +150,7 @@ static void netfs_clear_unread(struct netfs_read_subrequest *subreq)
 {
        struct iov_iter iter;
 
-       iov_iter_xarray(&iter, WRITE, &subreq->rreq->mapping->i_pages,
+       iov_iter_xarray(&iter, READ, &subreq->rreq->mapping->i_pages,
                        subreq->start + subreq->transferred,
                        subreq->len   - subreq->transferred);
        iov_iter_zero(iov_iter_count(&iter), &iter);
index edec458315854631e59ae0f0a4a9070b3ef6d4be..0a9b72685f984857f3e7a04651f8c5ca2e2b631f 100644 (file)
@@ -42,7 +42,6 @@ EXPORT_SYMBOL_GPL(locks_start_grace);
 
 /**
  * locks_end_grace
- * @net: net namespace that this lock manager belongs to
  * @lm: who this grace period is for
  *
  * Call this function to state that the given lock manager is ready to
index 7629248fdd532885d35423968c396dbbe9d56a6a..be3c1aad50ea3974433f39b3e6f362f2f0839155 100644 (file)
@@ -542,7 +542,7 @@ nfsd_file_close_inode_sync(struct inode *inode)
 }
 
 /**
- * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
+ * nfsd_file_close_inode - attempt a delayed close of a nfsd_file
  * @inode: inode of the file to attempt to remove
  *
  * Walk the whole hash bucket, looking for any files that correspond to "inode".
index 7abeccb975b22703cb40ab36b310da1657d8bc34..cf030ebe28275f04ee5bb7809bc55af193778761 100644 (file)
@@ -3544,15 +3544,18 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
                goto fail;
        cd->rd_maxcount -= entry_bytes;
        /*
-        * RFC 3530 14.2.24 describes rd_dircount as only a "hint", so
-        * let's always let through the first entry, at least:
+        * RFC 3530 14.2.24 describes rd_dircount as only a "hint", and
+        * notes that it could be zero. If it is zero, then the server
+        * should enforce only the rd_maxcount value.
         */
-       if (!cd->rd_dircount)
-               goto fail;
-       name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8;
-       if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)
-               goto fail;
-       cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);
+       if (cd->rd_dircount) {
+               name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8;
+               if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)
+                       goto fail;
+               cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);
+               if (!cd->rd_dircount)
+                       cd->rd_maxcount = 0;
+       }
 
        cd->cookie_offset = cookie_offset;
 skip_entry:
index c2c3d9077dc5813fbd63ee1df4bd154d7a376813..070e5dd03e26f3b92049edd7d1a7b98683fba64d 100644 (file)
@@ -793,7 +793,10 @@ out_close:
                svc_xprt_put(xprt);
        }
 out_err:
-       nfsd_destroy(net);
+       if (!list_empty(&nn->nfsd_serv->sv_permsocks))
+               nn->nfsd_serv->sv_nrthreads--;
+        else
+               nfsd_destroy(net);
        return err;
 }
 
@@ -1545,7 +1548,7 @@ static int __init init_nfsd(void)
                goto out_free_all;
        return 0;
 out_free_all:
-       unregister_pernet_subsys(&nfsd_net_ops);
+       unregister_filesystem(&nfsd_fs_type);
 out_free_exports:
        remove_proc_entry("fs/nfs/exports", NULL);
        remove_proc_entry("fs/nfs", NULL);
index 34c4cbf7e29bc49bdb3fa060fd2c235c3fb68536..e8c00dda42adbbff372908fdcb3d7d6814567cdf 100644 (file)
@@ -6,13 +6,9 @@
  * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
  */
 
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
 #include <linux/fs.h>
-#include <linux/hash.h>
-#include <linux/nls.h>
-#include <linux/ratelimit.h>
 #include <linux/slab.h>
+#include <linux/kernel.h>
 
 #include "debug.h"
 #include "ntfs.h"
@@ -291,7 +287,7 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
                if (!rsize) {
                        /* Empty resident -> Non empty nonresident. */
                } else if (!is_data) {
-                       err = ntfs_sb_write_run(sbi, run, 0, data, rsize);
+                       err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
                        if (err)
                                goto out2;
                } else if (!page) {
@@ -451,11 +447,8 @@ again:
 again_1:
        align = sbi->cluster_size;
 
-       if (is_ext) {
+       if (is_ext)
                align <<= attr_b->nres.c_unit;
-               if (is_attr_sparsed(attr_b))
-                       keep_prealloc = false;
-       }
 
        old_valid = le64_to_cpu(attr_b->nres.valid_size);
        old_size = le64_to_cpu(attr_b->nres.data_size);
@@ -465,9 +458,6 @@ again_1:
        new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
        new_alen = new_alloc >> cluster_bits;
 
-       if (keep_prealloc && is_ext)
-               keep_prealloc = false;
-
        if (keep_prealloc && new_size < old_size) {
                attr_b->nres.data_size = cpu_to_le64(new_size);
                mi_b->dirty = true;
@@ -529,7 +519,7 @@ add_alloc_in_same_attr_seg:
                } else if (pre_alloc == -1) {
                        pre_alloc = 0;
                        if (type == ATTR_DATA && !name_len &&
-                           sbi->options.prealloc) {
+                           sbi->options->prealloc) {
                                CLST new_alen2 = bytes_to_cluster(
                                        sbi, get_pre_allocated(new_size));
                                pre_alloc = new_alen2 - new_alen;
@@ -1966,7 +1956,7 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
                        return 0;
 
                from = vbo;
-               to = (vbo + bytes) < data_size ? (vbo + bytes) : data_size;
+               to = min_t(u64, vbo + bytes, data_size);
                memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
                return 0;
        }
index fa32399eb5171ce25f037fbd1bd8e483a37b839c..bad6d8a849a24b3a4a9fc2c4c372496d6ac1b54a 100644 (file)
@@ -5,10 +5,7 @@
  *
  */
 
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
 #include <linux/fs.h>
-#include <linux/nls.h>
 
 #include "debug.h"
 #include "ntfs.h"
@@ -336,7 +333,7 @@ int al_add_le(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name,
 
        if (attr && attr->non_res) {
                err = ntfs_sb_write_run(ni->mi.sbi, &al->run, 0, al->le,
-                                       al->size);
+                                       al->size, 0);
                if (err)
                        return err;
                al->dirty = false;
@@ -423,7 +420,7 @@ next:
        return true;
 }
 
-int al_update(struct ntfs_inode *ni)
+int al_update(struct ntfs_inode *ni, int sync)
 {
        int err;
        struct ATTRIB *attr;
@@ -445,7 +442,7 @@ int al_update(struct ntfs_inode *ni)
                memcpy(resident_data(attr), al->le, al->size);
        } else {
                err = ntfs_sb_write_run(ni->mi.sbi, &al->run, 0, al->le,
-                                       al->size);
+                                       al->size, sync);
                if (err)
                        goto out;
 
index ce304d40b5e1642cc84d48143e2aa961ac3ea735..50d838093790a1f07474ea201510dde42d2431f3 100644 (file)
@@ -5,13 +5,8 @@
  *
  */
 
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
-#include <linux/fs.h>
-#include <linux/nls.h>
+#include <linux/types.h>
 
-#include "debug.h"
-#include "ntfs.h"
 #include "ntfs_fs.h"
 
 #define BITS_IN_SIZE_T (sizeof(size_t) * 8)
@@ -124,8 +119,7 @@ bool are_bits_set(const ulong *lmap, size_t bit, size_t nbits)
 
        pos = nbits & 7;
        if (pos) {
-               u8 mask = fill_mask[pos];
-
+               mask = fill_mask[pos];
                if ((*map & mask) != mask)
                        return false;
        }
index 831501555009533d8b3eac04b6258e7072bd6304..aa184407520f0263844839e03d2f1edde3c6628f 100644 (file)
  *
  */
 
-#include <linux/blkdev.h>
 #include <linux/buffer_head.h>
 #include <linux/fs.h>
-#include <linux/nls.h>
+#include <linux/kernel.h>
 
-#include "debug.h"
 #include "ntfs.h"
 #include "ntfs_fs.h"
 
@@ -435,7 +433,7 @@ static void wnd_remove_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len)
                ;
        } else {
                n3 = rb_next(&e->count.node);
-               max_new_len = len > new_len ? len : new_len;
+               max_new_len = max(len, new_len);
                if (!n3) {
                        wnd->extent_max = max_new_len;
                } else {
@@ -731,7 +729,7 @@ int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
                        wbits = wnd->bits_last;
 
                tail = wbits - wbit;
-               op = tail < bits ? tail : bits;
+               op = min_t(u32, tail, bits);
 
                bh = wnd_map(wnd, iw);
                if (IS_ERR(bh)) {
@@ -784,7 +782,7 @@ int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
                        wbits = wnd->bits_last;
 
                tail = wbits - wbit;
-               op = tail < bits ? tail : bits;
+               op = min_t(u32, tail, bits);
 
                bh = wnd_map(wnd, iw);
                if (IS_ERR(bh)) {
@@ -834,7 +832,7 @@ static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits)
                        wbits = wnd->bits_last;
 
                tail = wbits - wbit;
-               op = tail < bits ? tail : bits;
+               op = min_t(u32, tail, bits);
 
                if (wbits != wnd->free_bits[iw]) {
                        bool ret;
@@ -926,7 +924,7 @@ use_wnd:
                        wbits = wnd->bits_last;
 
                tail = wbits - wbit;
-               op = tail < bits ? tail : bits;
+               op = min_t(u32, tail, bits);
 
                if (wnd->free_bits[iw]) {
                        bool ret;
index 31120569a87b9b07cf91f8e0b8c7291db059f4f3..53ef7489c75fd70f239d4e50af9a5e2f097c6bde 100644 (file)
@@ -11,6 +11,9 @@
 #ifndef _LINUX_NTFS3_DEBUG_H
 #define _LINUX_NTFS3_DEBUG_H
 
+struct super_block;
+struct inode;
+
 #ifndef Add2Ptr
 #define Add2Ptr(P, I)          ((void *)((u8 *)(P) + (I)))
 #define PtrOffset(B, O)                ((size_t)((size_t)(O) - (size_t)(B)))
index 93f6d485564e0103e56ad417f184f1ace0926eea..fb438d6040409838389ba7454645034ab2fa7a08 100644 (file)
@@ -7,10 +7,7 @@
  *
  */
 
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
 #include <linux/fs.h>
-#include <linux/iversion.h>
 #include <linux/nls.h>
 
 #include "debug.h"
 #include "ntfs_fs.h"
 
 /* Convert little endian UTF-16 to NLS string. */
-int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const struct le_str *uni,
+int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const __le16 *name, u32 len,
                      u8 *buf, int buf_len)
 {
-       int ret, uni_len, warn;
-       const __le16 *ip;
+       int ret, warn;
        u8 *op;
-       struct nls_table *nls = sbi->options.nls;
+       struct nls_table *nls = sbi->options->nls;
 
        static_assert(sizeof(wchar_t) == sizeof(__le16));
 
        if (!nls) {
                /* UTF-16 -> UTF-8 */
-               ret = utf16s_to_utf8s((wchar_t *)uni->name, uni->len,
-                                     UTF16_LITTLE_ENDIAN, buf, buf_len);
+               ret = utf16s_to_utf8s(name, len, UTF16_LITTLE_ENDIAN, buf,
+                                     buf_len);
                buf[ret] = '\0';
                return ret;
        }
 
-       ip = uni->name;
        op = buf;
-       uni_len = uni->len;
        warn = 0;
 
-       while (uni_len--) {
+       while (len--) {
                u16 ec;
                int charlen;
                char dump[5];
@@ -52,7 +46,7 @@ int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const struct le_str *uni,
                        break;
                }
 
-               ec = le16_to_cpu(*ip++);
+               ec = le16_to_cpu(*name++);
                charlen = nls->uni2char(ec, op, buf_len);
 
                if (charlen > 0) {
@@ -186,7 +180,7 @@ int ntfs_nls_to_utf16(struct ntfs_sb_info *sbi, const u8 *name, u32 name_len,
 {
        int ret, slen;
        const u8 *end;
-       struct nls_table *nls = sbi->options.nls;
+       struct nls_table *nls = sbi->options->nls;
        u16 *uname = uni->name;
 
        static_assert(sizeof(wchar_t) == sizeof(u16));
@@ -301,14 +295,14 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
                return 0;
 
        /* Skip meta files. Unless option to show metafiles is set. */
-       if (!sbi->options.showmeta && ntfs_is_meta_file(sbi, ino))
+       if (!sbi->options->showmeta && ntfs_is_meta_file(sbi, ino))
                return 0;
 
-       if (sbi->options.nohidden && (fname->dup.fa & FILE_ATTRIBUTE_HIDDEN))
+       if (sbi->options->nohidden && (fname->dup.fa & FILE_ATTRIBUTE_HIDDEN))
                return 0;
 
-       name_len = ntfs_utf16_to_nls(sbi, (struct le_str *)&fname->name_len,
-                                    name, PATH_MAX);
+       name_len = ntfs_utf16_to_nls(sbi, fname->name, fname->name_len, name,
+                                    PATH_MAX);
        if (name_len <= 0) {
                ntfs_warn(sbi->sb, "failed to convert name for inode %lx.",
                          ino);
index 424450e77ad52ae550e8de444dc393411d97cde6..43b1451bff539576cd03c0f624708bf65b959270 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/compat.h>
 #include <linux/falloc.h>
 #include <linux/fiemap.h>
-#include <linux/nls.h>
 
 #include "debug.h"
 #include "ntfs.h"
@@ -588,8 +587,11 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
                truncate_pagecache(inode, vbo_down);
 
                if (!is_sparsed(ni) && !is_compressed(ni)) {
-                       /* Normal file. */
-                       err = ntfs_zero_range(inode, vbo, end);
+                       /*
+                        * Normal file, can't make hole.
+                        * TODO: Try to find way to save info about hole.
+                        */
+                       err = -EOPNOTSUPP;
                        goto out;
                }
 
@@ -737,7 +739,7 @@ int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
        umode_t mode = inode->i_mode;
        int err;
 
-       if (sbi->options.no_acs_rules) {
+       if (sbi->options->noacsrules) {
                /* "No access rules" - Force any changes of time etc. */
                attr->ia_valid |= ATTR_FORCE;
                /* and disable for editing some attributes. */
@@ -1185,7 +1187,7 @@ static int ntfs_file_release(struct inode *inode, struct file *file)
        int err = 0;
 
        /* If we are last writer on the inode, drop the block reservation. */
-       if (sbi->options.prealloc && ((file->f_mode & FMODE_WRITE) &&
+       if (sbi->options->prealloc && ((file->f_mode & FMODE_WRITE) &&
                                      atomic_read(&inode->i_writecount) == 1)) {
                ni_lock(ni);
                down_write(&ni->file.run_lock);
index 938b12d56ca676308d2af82666a4f6f19cc8f122..6f47a9c17f896c62e355db7a6948075a88247378 100644 (file)
@@ -5,11 +5,8 @@
  *
  */
 
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
 #include <linux/fiemap.h>
 #include <linux/fs.h>
-#include <linux/nls.h>
 #include <linux/vmalloc.h>
 
 #include "debug.h"
@@ -708,18 +705,35 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
                        continue;
 
                mi = ni_find_mi(ni, ino_get(&le->ref));
+               if (!mi) {
+                       /* Should never happened, 'cause already checked. */
+                       goto bad;
+               }
 
                attr = mi_find_attr(mi, NULL, le->type, le_name(le),
                                    le->name_len, &le->id);
+               if (!attr) {
+                       /* Should never happened, 'cause already checked. */
+                       goto bad;
+               }
                asize = le32_to_cpu(attr->size);
 
                /* Insert into primary record. */
                attr_ins = mi_insert_attr(&ni->mi, le->type, le_name(le),
                                          le->name_len, asize,
                                          le16_to_cpu(attr->name_off));
-               id = attr_ins->id;
+               if (!attr_ins) {
+                       /*
+                        * Internal error.
+                        * Either no space in primary record (already checked).
+                        * Either tried to insert another
+                        * non indexed attribute (logic error).
+                        */
+                       goto bad;
+               }
 
                /* Copy all except id. */
+               id = attr_ins->id;
                memcpy(attr_ins, attr, asize);
                attr_ins->id = id;
 
@@ -735,6 +749,10 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
        ni->attr_list.dirty = false;
 
        return 0;
+bad:
+       ntfs_inode_err(&ni->vfs_inode, "Internal error");
+       make_bad_inode(&ni->vfs_inode);
+       return -EINVAL;
 }
 
 /*
@@ -956,6 +974,13 @@ static int ni_ins_attr_ext(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
                        continue;
                }
 
+               /*
+                * Do not try to insert this attribute
+                * if there is no room in record.
+                */
+               if (le32_to_cpu(mi->mrec->used) + asize > sbi->record_size)
+                       continue;
+
                /* Try to insert attribute into this subrecord. */
                attr = ni_ins_new_attr(ni, mi, le, type, name, name_len, asize,
                                       name_off, svcn, ins_le);
@@ -1451,7 +1476,7 @@ int ni_insert_resident(struct ntfs_inode *ni, u32 data_size,
                attr->res.flags = RESIDENT_FLAG_INDEXED;
 
                /* is_attr_indexed(attr)) == true */
-               le16_add_cpu(&ni->mi.mrec->hard_links, +1);
+               le16_add_cpu(&ni->mi.mrec->hard_links, 1);
                ni->mi.dirty = true;
        }
        attr->res.res = 0;
@@ -1606,7 +1631,7 @@ struct ATTR_FILE_NAME *ni_fname_type(struct ntfs_inode *ni, u8 name_type,
 
        *le = NULL;
 
-       if (FILE_NAME_POSIX == name_type)
+       if (name_type == FILE_NAME_POSIX)
                return NULL;
 
        /* Enumerate all names. */
@@ -1706,18 +1731,16 @@ out:
 /*
  * ni_parse_reparse
  *
- * Buffer is at least 24 bytes.
+ * buffer - memory for reparse buffer header
  */
 enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
-                                  void *buffer)
+                                  struct REPARSE_DATA_BUFFER *buffer)
 {
        const struct REPARSE_DATA_BUFFER *rp = NULL;
        u8 bits;
        u16 len;
        typeof(rp->CompressReparseBuffer) *cmpr;
 
-       static_assert(sizeof(struct REPARSE_DATA_BUFFER) <= 24);
-
        /* Try to estimate reparse point. */
        if (!attr->non_res) {
                rp = resident_data_ex(attr, sizeof(struct REPARSE_DATA_BUFFER));
@@ -1803,6 +1826,9 @@ enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
                return REPARSE_NONE;
        }
 
+       if (buffer != rp)
+               memcpy(buffer, rp, sizeof(struct REPARSE_DATA_BUFFER));
+
        /* Looks like normal symlink. */
        return REPARSE_LINK;
 }
@@ -2906,9 +2932,8 @@ bool ni_remove_name_undo(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
                memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), de + 1, de_key_size);
                mi_get_ref(&ni->mi, &de->ref);
 
-               if (indx_insert_entry(&dir_ni->dir, dir_ni, de, sbi, NULL, 1)) {
+               if (indx_insert_entry(&dir_ni->dir, dir_ni, de, sbi, NULL, 1))
                        return false;
-               }
        }
 
        return true;
@@ -3077,7 +3102,9 @@ static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup,
                        const struct EA_INFO *info;
 
                        info = resident_data_ex(attr, sizeof(struct EA_INFO));
-                       dup->ea_size = info->size_pack;
+                       /* If ATTR_EA_INFO exists 'info' can't be NULL. */
+                       if (info)
+                               dup->ea_size = info->size_pack;
                }
        }
 
@@ -3205,7 +3232,7 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
                                        goto out;
                        }
 
-                       err = al_update(ni);
+                       err = al_update(ni, sync);
                        if (err)
                                goto out;
                }
index b5853aed0e25bc6d93b3f6f667ae36feda3d980b..06492f088d6020592bd4fdb8b1a6ecd22b823324 100644 (file)
@@ -6,12 +6,8 @@
  */
 
 #include <linux/blkdev.h>
-#include <linux/buffer_head.h>
 #include <linux/fs.h>
-#include <linux/hash.h>
-#include <linux/nls.h>
 #include <linux/random.h>
-#include <linux/ratelimit.h>
 #include <linux/slab.h>
 
 #include "debug.h"
@@ -2219,7 +2215,7 @@ file_is_valid:
 
                        err = ntfs_sb_write_run(log->ni->mi.sbi,
                                                &log->ni->file.run, off, page,
-                                               log->page_size);
+                                               log->page_size, 0);
 
                        if (err)
                                goto out;
@@ -3710,7 +3706,7 @@ move_data:
 
        if (a_dirty) {
                attr = oa->attr;
-               err = ntfs_sb_write_run(sbi, oa->run1, vbo, buffer_le, bytes);
+               err = ntfs_sb_write_run(sbi, oa->run1, vbo, buffer_le, bytes, 0);
                if (err)
                        goto out;
        }
@@ -5152,10 +5148,10 @@ end_reply:
 
        ntfs_fix_pre_write(&rh->rhdr, log->page_size);
 
-       err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rh, log->page_size);
+       err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rh, log->page_size, 0);
        if (!err)
                err = ntfs_sb_write_run(sbi, &log->ni->file.run, log->page_size,
-                                       rh, log->page_size);
+                                       rh, log->page_size, 0);
 
        kfree(rh);
        if (err)
index 91e3743e1442f10cfc727eba0587a9ef0b20c039..4de9acb1696898057fe3a096984ab3c6c1119810 100644 (file)
@@ -8,7 +8,7 @@
 #include <linux/blkdev.h>
 #include <linux/buffer_head.h>
 #include <linux/fs.h>
-#include <linux/nls.h>
+#include <linux/kernel.h>
 
 #include "debug.h"
 #include "ntfs.h"
@@ -358,7 +358,7 @@ int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
                             enum ALLOCATE_OPT opt)
 {
        int err;
-       CLST alen = 0;
+       CLST alen;
        struct super_block *sb = sbi->sb;
        size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
        struct wnd_bitmap *wnd = &sbi->used.bitmap;
@@ -370,27 +370,28 @@ int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
                if (!zlen) {
                        err = ntfs_refresh_zone(sbi);
                        if (err)
-                               goto out;
+                               goto up_write;
+
                        zlen = wnd_zone_len(wnd);
                }
 
                if (!zlen) {
                        ntfs_err(sbi->sb, "no free space to extend mft");
-                       goto out;
+                       err = -ENOSPC;
+                       goto up_write;
                }
 
                lcn = wnd_zone_bit(wnd);
-               alen = zlen > len ? len : zlen;
+               alen = min_t(CLST, len, zlen);
 
                wnd_zone_set(wnd, lcn + alen, zlen - alen);
 
                err = wnd_set_used(wnd, lcn, alen);
-               if (err) {
-                       up_write(&wnd->rw_lock);
-                       return err;
-               }
+               if (err)
+                       goto up_write;
+
                alcn = lcn;
-               goto out;
+               goto space_found;
        }
        /*
         * 'Cause cluster 0 is always used this value means that we should use
@@ -404,49 +405,45 @@ int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
 
        alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
        if (alen)
-               goto out;
+               goto space_found;
 
        /* Try to use clusters from MftZone. */
        zlen = wnd_zone_len(wnd);
        zeroes = wnd_zeroes(wnd);
 
        /* Check too big request */
-       if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE)
-               goto out;
+       if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
+               err = -ENOSPC;
+               goto up_write;
+       }
 
        /* How many clusters to cat from zone. */
        zlcn = wnd_zone_bit(wnd);
        zlen2 = zlen >> 1;
-       ztrim = len > zlen ? zlen : (len > zlen2 ? len : zlen2);
-       new_zlen = zlen - ztrim;
-
-       if (new_zlen < NTFS_MIN_MFT_ZONE) {
-               new_zlen = NTFS_MIN_MFT_ZONE;
-               if (new_zlen > zlen)
-                       new_zlen = zlen;
-       }
+       ztrim = clamp_val(len, zlen2, zlen);
+       new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
 
        wnd_zone_set(wnd, zlcn, new_zlen);
 
        /* Allocate continues clusters. */
        alen = wnd_find(wnd, len, 0,
                        BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
-
-out:
-       if (alen) {
-               err = 0;
-               *new_len = alen;
-               *new_lcn = alcn;
-
-               ntfs_unmap_meta(sb, alcn, alen);
-
-               /* Set hint for next requests. */
-               if (!(opt & ALLOCATE_MFT))
-                       sbi->used.next_free_lcn = alcn + alen;
-       } else {
+       if (!alen) {
                err = -ENOSPC;
+               goto up_write;
        }
 
+space_found:
+       err = 0;
+       *new_len = alen;
+       *new_lcn = alcn;
+
+       ntfs_unmap_meta(sb, alcn, alen);
+
+       /* Set hint for next requests. */
+       if (!(opt & ALLOCATE_MFT))
+               sbi->used.next_free_lcn = alcn + alen;
+up_write:
        up_write(&wnd->rw_lock);
        return err;
 }
@@ -1080,7 +1077,7 @@ int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
 }
 
 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
-                     u64 vbo, const void *buf, size_t bytes)
+                     u64 vbo, const void *buf, size_t bytes, int sync)
 {
        struct super_block *sb = sbi->sb;
        u8 cluster_bits = sbi->cluster_bits;
@@ -1099,8 +1096,8 @@ int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
        len = ((u64)clen << cluster_bits) - off;
 
        for (;;) {
-               u32 op = len < bytes ? len : bytes;
-               int err = ntfs_sb_write(sb, lbo, op, buf, 0);
+               u32 op = min_t(u64, len, bytes);
+               int err = ntfs_sb_write(sb, lbo, op, buf, sync);
 
                if (err)
                        return err;
@@ -1300,7 +1297,7 @@ int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
        nb->off = off = lbo & (blocksize - 1);
 
        for (;;) {
-               u32 len32 = len < bytes ? len : bytes;
+               u32 len32 = min_t(u64, len, bytes);
                sector_t block = lbo >> sb->s_blocksize_bits;
 
                do {
@@ -2175,7 +2172,7 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
 
        /* Write main SDS bucket. */
        err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
-                               d_security, aligned_sec_size);
+                               d_security, aligned_sec_size, 0);
 
        if (err)
                goto out;
@@ -2193,7 +2190,7 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
 
        /* Write copy SDS bucket. */
        err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
-                               aligned_sec_size);
+                               aligned_sec_size, 0);
        if (err)
                goto out;
 
index 0daca9adc54c79084743efc2487b49a0ac0d3227..6f81e3a49abfb30f7aa570e26cf595b3e8599032 100644 (file)
@@ -8,7 +8,7 @@
 #include <linux/blkdev.h>
 #include <linux/buffer_head.h>
 #include <linux/fs.h>
-#include <linux/nls.h>
+#include <linux/kernel.h>
 
 #include "debug.h"
 #include "ntfs.h"
@@ -671,138 +671,74 @@ static struct NTFS_DE *hdr_find_e(const struct ntfs_index *indx,
                                  const struct INDEX_HDR *hdr, const void *key,
                                  size_t key_len, const void *ctx, int *diff)
 {
-       struct NTFS_DE *e;
+       struct NTFS_DE *e, *found = NULL;
        NTFS_CMP_FUNC cmp = indx->cmp;
+       int min_idx = 0, mid_idx, max_idx = 0;
+       int diff2;
+       int table_size = 8;
        u32 e_size, e_key_len;
        u32 end = le32_to_cpu(hdr->used);
        u32 off = le32_to_cpu(hdr->de_off);
+       u16 offs[128];
 
-#ifdef NTFS3_INDEX_BINARY_SEARCH
-       int max_idx = 0, fnd, min_idx;
-       int nslots = 64;
-       u16 *offs;
-
-       if (end > 0x10000)
-               goto next;
-
-       offs = kmalloc(sizeof(u16) * nslots, GFP_NOFS);
-       if (!offs)
-               goto next;
+fill_table:
+       if (off + sizeof(struct NTFS_DE) > end)
+               return NULL;
 
-       /* Use binary search algorithm. */
-next1:
-       if (off + sizeof(struct NTFS_DE) > end) {
-               e = NULL;
-               goto out1;
-       }
        e = Add2Ptr(hdr, off);
        e_size = le16_to_cpu(e->size);
 
-       if (e_size < sizeof(struct NTFS_DE) || off + e_size > end) {
-               e = NULL;
-               goto out1;
-       }
-
-       if (max_idx >= nslots) {
-               u16 *ptr;
-               int new_slots = ALIGN(2 * nslots, 8);
-
-               ptr = kmalloc(sizeof(u16) * new_slots, GFP_NOFS);
-               if (ptr)
-                       memcpy(ptr, offs, sizeof(u16) * max_idx);
-               kfree(offs);
-               offs = ptr;
-               nslots = new_slots;
-               if (!ptr)
-                       goto next;
-       }
-
-       /* Store entry table. */
-       offs[max_idx] = off;
+       if (e_size < sizeof(struct NTFS_DE) || off + e_size > end)
+               return NULL;
 
        if (!de_is_last(e)) {
+               offs[max_idx] = off;
                off += e_size;
-               max_idx += 1;
-               goto next1;
-       }
 
-       /*
-        * Table of pointers is created.
-        * Use binary search to find entry that is <= to the search value.
-        */
-       fnd = -1;
-       min_idx = 0;
+               max_idx++;
+               if (max_idx < table_size)
+                       goto fill_table;
 
-       while (min_idx <= max_idx) {
-               int mid_idx = min_idx + ((max_idx - min_idx) >> 1);
-               int diff2;
-
-               e = Add2Ptr(hdr, offs[mid_idx]);
+               max_idx--;
+       }
 
-               e_key_len = le16_to_cpu(e->key_size);
+binary_search:
+       e_key_len = le16_to_cpu(e->key_size);
 
-               diff2 = (*cmp)(key, key_len, e + 1, e_key_len, ctx);
+       diff2 = (*cmp)(key, key_len, e + 1, e_key_len, ctx);
+       if (diff2 > 0) {
+               if (found) {
+                       min_idx = mid_idx + 1;
+               } else {
+                       if (de_is_last(e))
+                               return NULL;
 
-               if (!diff2) {
-                       *diff = 0;
-                       goto out1;
+                       max_idx = 0;
+                       table_size = min(table_size * 2,
+                                        (int)ARRAY_SIZE(offs));
+                       goto fill_table;
                }
-
-               if (diff2 < 0) {
+       } else if (diff2 < 0) {
+               if (found)
                        max_idx = mid_idx - 1;
-                       fnd = mid_idx;
-                       if (!fnd)
-                               break;
-               } else {
-                       min_idx = mid_idx + 1;
-               }
-       }
+               else
+                       max_idx--;
 
-       if (fnd == -1) {
-               e = NULL;
-               goto out1;
+               found = e;
+       } else {
+               *diff = 0;
+               return e;
        }
 
-       *diff = -1;
-       e = Add2Ptr(hdr, offs[fnd]);
-
-out1:
-       kfree(offs);
-
-       return e;
-#endif
-
-next:
-       /*
-        * Entries index are sorted.
-        * Enumerate all entries until we find entry
-        * that is <= to the search value.
-        */
-       if (off + sizeof(struct NTFS_DE) > end)
-               return NULL;
-
-       e = Add2Ptr(hdr, off);
-       e_size = le16_to_cpu(e->size);
-
-       if (e_size < sizeof(struct NTFS_DE) || off + e_size > end)
-               return NULL;
-
-       off += e_size;
-
-       e_key_len = le16_to_cpu(e->key_size);
-
-       *diff = (*cmp)(key, key_len, e + 1, e_key_len, ctx);
-       if (!*diff)
-               return e;
+       if (min_idx > max_idx) {
+               *diff = -1;
+               return found;
+       }
 
-       if (*diff <= 0)
-               return e;
+       mid_idx = (min_idx + max_idx) >> 1;
+       e = Add2Ptr(hdr, offs[mid_idx]);
 
-       if (de_is_last(e)) {
-               *diff = 1;
-               return e;
-       }
-       goto next;
+       goto binary_search;
 }
 
 /*
@@ -1136,9 +1072,7 @@ int indx_find(struct ntfs_index *indx, struct ntfs_inode *ni,
        if (!e)
                return -EINVAL;
 
-       if (fnd)
-               fnd->root_de = e;
-
+       fnd->root_de = e;
        err = 0;
 
        for (;;) {
@@ -1401,7 +1335,7 @@ ok:
 static int indx_create_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
                                CLST *vbn)
 {
-       int err = -ENOMEM;
+       int err;
        struct ntfs_sb_info *sbi = ni->mi.sbi;
        struct ATTRIB *bitmap;
        struct ATTRIB *alloc;
index db2a5a4c38e4d62abfb2c7ca16947cd766982b13..859951d785cb2f4380c85b1d799f66f008da7d45 100644 (file)
@@ -5,10 +5,8 @@
  *
  */
 
-#include <linux/blkdev.h>
 #include <linux/buffer_head.h>
 #include <linux/fs.h>
-#include <linux/iversion.h>
 #include <linux/mpage.h>
 #include <linux/namei.h>
 #include <linux/nls.h>
@@ -49,8 +47,8 @@ static struct inode *ntfs_read_mft(struct inode *inode,
 
        inode->i_op = NULL;
        /* Setup 'uid' and 'gid' */
-       inode->i_uid = sbi->options.fs_uid;
-       inode->i_gid = sbi->options.fs_gid;
+       inode->i_uid = sbi->options->fs_uid;
+       inode->i_gid = sbi->options->fs_gid;
 
        err = mi_init(&ni->mi, sbi, ino);
        if (err)
@@ -224,12 +222,9 @@ next_attr:
                if (!attr->non_res) {
                        ni->i_valid = inode->i_size = rsize;
                        inode_set_bytes(inode, rsize);
-                       t32 = asize;
-               } else {
-                       t32 = le16_to_cpu(attr->nres.run_off);
                }
 
-               mode = S_IFREG | (0777 & sbi->options.fs_fmask_inv);
+               mode = S_IFREG | (0777 & sbi->options->fs_fmask_inv);
 
                if (!attr->non_res) {
                        ni->ni_flags |= NI_FLAG_RESIDENT;
@@ -272,7 +267,7 @@ next_attr:
                        goto out;
 
                mode = sb->s_root
-                              ? (S_IFDIR | (0777 & sbi->options.fs_dmask_inv))
+                              ? (S_IFDIR | (0777 & sbi->options->fs_dmask_inv))
                               : (S_IFDIR | 0777);
                goto next_attr;
 
@@ -315,17 +310,14 @@ next_attr:
                rp_fa = ni_parse_reparse(ni, attr, &rp);
                switch (rp_fa) {
                case REPARSE_LINK:
-                       if (!attr->non_res) {
-                               inode->i_size = rsize;
-                               inode_set_bytes(inode, rsize);
-                               t32 = asize;
-                       } else {
-                               inode->i_size =
-                                       le64_to_cpu(attr->nres.data_size);
-                               t32 = le16_to_cpu(attr->nres.run_off);
-                       }
+                       /*
+                        * Normal symlink.
+                        * Assume one unicode symbol == one utf8.
+                        */
+                       inode->i_size = le16_to_cpu(rp.SymbolicLinkReparseBuffer
+                                                           .PrintNameLength) /
+                                       sizeof(u16);
 
-                       /* Looks like normal symlink. */
                        ni->i_valid = inode->i_size;
 
                        /* Clear directory bit. */
@@ -422,7 +414,7 @@ end_enum:
                ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
                inode->i_op = &ntfs_link_inode_operations;
                inode->i_fop = NULL;
-               inode_nohighmem(inode); // ??
+               inode_nohighmem(inode);
        } else if (S_ISREG(mode)) {
                ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
                inode->i_op = &ntfs_file_inode_operations;
@@ -443,7 +435,7 @@ end_enum:
                goto out;
        }
 
-       if ((sbi->options.sys_immutable &&
+       if ((sbi->options->sys_immutable &&
             (std5->fa & FILE_ATTRIBUTE_SYSTEM)) &&
            !S_ISFIFO(mode) && !S_ISSOCK(mode) && !S_ISLNK(mode)) {
                inode->i_flags |= S_IMMUTABLE;
@@ -1200,9 +1192,13 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
        struct REPARSE_DATA_BUFFER *rp = NULL;
        bool rp_inserted = false;
 
+       ni_lock_dir(dir_ni);
+
        dir_root = indx_get_root(&dir_ni->dir, dir_ni, NULL, NULL);
-       if (!dir_root)
-               return ERR_PTR(-EINVAL);
+       if (!dir_root) {
+               err = -EINVAL;
+               goto out1;
+       }
 
        if (S_ISDIR(mode)) {
                /* Use parent's directory attributes. */
@@ -1244,7 +1240,7 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
                 *      }
                 */
        } else if (S_ISREG(mode)) {
-               if (sbi->options.sparse) {
+               if (sbi->options->sparse) {
                        /* Sparsed regular file, cause option 'sparse'. */
                        fa = FILE_ATTRIBUTE_SPARSE_FILE |
                             FILE_ATTRIBUTE_ARCHIVE;
@@ -1486,7 +1482,10 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
                asize = ALIGN(SIZEOF_RESIDENT + nsize, 8);
                t16 = PtrOffset(rec, attr);
 
-               /* 0x78 - the size of EA + EAINFO to store WSL */
+               /*
+                * Below function 'ntfs_save_wsl_perm' requires 0x78 bytes.
+                * It is good idea to keep extened attributes resident.
+                */
                if (asize + t16 + 0x78 + 8 > sbi->record_size) {
                        CLST alen;
                        CLST clst = bytes_to_cluster(sbi, nsize);
@@ -1521,14 +1520,14 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
                        }
 
                        asize = SIZEOF_NONRESIDENT + ALIGN(err, 8);
-                       inode->i_size = nsize;
                } else {
                        attr->res.data_off = SIZEOF_RESIDENT_LE;
                        attr->res.data_size = cpu_to_le32(nsize);
                        memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), rp, nsize);
-                       inode->i_size = nsize;
                        nsize = 0;
                }
+               /* Size of symlink equals the length of input string. */
+               inode->i_size = size;
 
                attr->size = cpu_to_le32(asize);
 
@@ -1551,6 +1550,9 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
        if (err)
                goto out6;
 
+       /* Unlock parent directory before ntfs_init_acl. */
+       ni_unlock(dir_ni);
+
        inode->i_generation = le16_to_cpu(rec->seq);
 
        dir->i_mtime = dir->i_ctime = inode->i_atime;
@@ -1562,6 +1564,8 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
                inode->i_op = &ntfs_link_inode_operations;
                inode->i_fop = NULL;
                inode->i_mapping->a_ops = &ntfs_aops;
+               inode->i_size = size;
+               inode_nohighmem(inode);
        } else if (S_ISREG(mode)) {
                inode->i_op = &ntfs_file_inode_operations;
                inode->i_fop = &ntfs_file_operations;
@@ -1577,7 +1581,7 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
        if (!S_ISLNK(mode) && (sb->s_flags & SB_POSIXACL)) {
                err = ntfs_init_acl(mnt_userns, inode, dir);
                if (err)
-                       goto out6;
+                       goto out7;
        } else
 #endif
        {
@@ -1586,7 +1590,7 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
 
        /* Write non resident data. */
        if (nsize) {
-               err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rp, nsize);
+               err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rp, nsize, 0);
                if (err)
                        goto out7;
        }
@@ -1607,8 +1611,10 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
 out7:
 
        /* Undo 'indx_insert_entry'. */
+       ni_lock_dir(dir_ni);
        indx_delete_entry(&dir_ni->dir, dir_ni, new_de + 1,
                          le16_to_cpu(new_de->key_size), sbi);
+       /* ni_unlock(dir_ni); will be called later. */
 out6:
        if (rp_inserted)
                ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref);
@@ -1632,8 +1638,10 @@ out2:
        kfree(rp);
 
 out1:
-       if (err)
+       if (err) {
+               ni_unlock(dir_ni);
                return ERR_PTR(err);
+       }
 
        unlock_new_inode(inode);
 
@@ -1754,15 +1762,15 @@ void ntfs_evict_inode(struct inode *inode)
 static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
                                      int buflen)
 {
-       int i, err = 0;
+       int i, err = -EINVAL;
        struct ntfs_inode *ni = ntfs_i(inode);
        struct super_block *sb = inode->i_sb;
        struct ntfs_sb_info *sbi = sb->s_fs_info;
-       u64 i_size = inode->i_size;
-       u16 nlen = 0;
+       u64 size;
+       u16 ulen = 0;
        void *to_free = NULL;
        struct REPARSE_DATA_BUFFER *rp;
-       struct le_str *uni;
+       const __le16 *uname;
        struct ATTRIB *attr;
 
        /* Reparse data present. Try to parse it. */
@@ -1771,68 +1779,64 @@ static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
 
        *buffer = 0;
 
-       /* Read into temporal buffer. */
-       if (i_size > sbi->reparse.max_size || i_size <= sizeof(u32)) {
-               err = -EINVAL;
-               goto out;
-       }
-
        attr = ni_find_attr(ni, NULL, NULL, ATTR_REPARSE, NULL, 0, NULL, NULL);
-       if (!attr) {
-               err = -EINVAL;
+       if (!attr)
                goto out;
-       }
 
        if (!attr->non_res) {
-               rp = resident_data_ex(attr, i_size);
-               if (!rp) {
-                       err = -EINVAL;
+               rp = resident_data_ex(attr, sizeof(struct REPARSE_DATA_BUFFER));
+               if (!rp)
                        goto out;
-               }
+               size = le32_to_cpu(attr->res.data_size);
        } else {
-               rp = kmalloc(i_size, GFP_NOFS);
+               size = le64_to_cpu(attr->nres.data_size);
+               rp = NULL;
+       }
+
+       if (size > sbi->reparse.max_size || size <= sizeof(u32))
+               goto out;
+
+       if (!rp) {
+               rp = kmalloc(size, GFP_NOFS);
                if (!rp) {
                        err = -ENOMEM;
                        goto out;
                }
                to_free = rp;
-               err = ntfs_read_run_nb(sbi, &ni->file.run, 0, rp, i_size, NULL);
+               /* Read into temporal buffer. */
+               err = ntfs_read_run_nb(sbi, &ni->file.run, 0, rp, size, NULL);
                if (err)
                        goto out;
        }
 
-       err = -EINVAL;
-
        /* Microsoft Tag. */
        switch (rp->ReparseTag) {
        case IO_REPARSE_TAG_MOUNT_POINT:
                /* Mount points and junctions. */
                /* Can we use 'Rp->MountPointReparseBuffer.PrintNameLength'? */
-               if (i_size <= offsetof(struct REPARSE_DATA_BUFFER,
-                                      MountPointReparseBuffer.PathBuffer))
+               if (size <= offsetof(struct REPARSE_DATA_BUFFER,
+                                    MountPointReparseBuffer.PathBuffer))
                        goto out;
-               uni = Add2Ptr(rp,
-                             offsetof(struct REPARSE_DATA_BUFFER,
-                                      MountPointReparseBuffer.PathBuffer) +
-                                     le16_to_cpu(rp->MountPointReparseBuffer
-                                                         .PrintNameOffset) -
-                                     2);
-               nlen = le16_to_cpu(rp->MountPointReparseBuffer.PrintNameLength);
+               uname = Add2Ptr(rp,
+                               offsetof(struct REPARSE_DATA_BUFFER,
+                                        MountPointReparseBuffer.PathBuffer) +
+                                       le16_to_cpu(rp->MountPointReparseBuffer
+                                                           .PrintNameOffset));
+               ulen = le16_to_cpu(rp->MountPointReparseBuffer.PrintNameLength);
                break;
 
        case IO_REPARSE_TAG_SYMLINK:
                /* FolderSymbolicLink */
                /* Can we use 'Rp->SymbolicLinkReparseBuffer.PrintNameLength'? */
-               if (i_size <= offsetof(struct REPARSE_DATA_BUFFER,
-                                      SymbolicLinkReparseBuffer.PathBuffer))
+               if (size <= offsetof(struct REPARSE_DATA_BUFFER,
+                                    SymbolicLinkReparseBuffer.PathBuffer))
                        goto out;
-               uni = Add2Ptr(rp,
-                             offsetof(struct REPARSE_DATA_BUFFER,
-                                      SymbolicLinkReparseBuffer.PathBuffer) +
-                                     le16_to_cpu(rp->SymbolicLinkReparseBuffer
-                                                         .PrintNameOffset) -
-                                     2);
-               nlen = le16_to_cpu(
+               uname = Add2Ptr(
+                       rp, offsetof(struct REPARSE_DATA_BUFFER,
+                                    SymbolicLinkReparseBuffer.PathBuffer) +
+                                   le16_to_cpu(rp->SymbolicLinkReparseBuffer
+                                                       .PrintNameOffset));
+               ulen = le16_to_cpu(
                        rp->SymbolicLinkReparseBuffer.PrintNameLength);
                break;
 
@@ -1864,29 +1868,28 @@ static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
                        goto out;
                }
                if (!IsReparseTagNameSurrogate(rp->ReparseTag) ||
-                   i_size <= sizeof(struct REPARSE_POINT)) {
+                   size <= sizeof(struct REPARSE_POINT)) {
                        goto out;
                }
 
                /* Users tag. */
-               uni = Add2Ptr(rp, sizeof(struct REPARSE_POINT) - 2);
-               nlen = le16_to_cpu(rp->ReparseDataLength) -
+               uname = Add2Ptr(rp, sizeof(struct REPARSE_POINT));
+               ulen = le16_to_cpu(rp->ReparseDataLength) -
                       sizeof(struct REPARSE_POINT);
        }
 
        /* Convert nlen from bytes to UNICODE chars. */
-       nlen >>= 1;
+       ulen >>= 1;
 
        /* Check that name is available. */
-       if (!nlen || &uni->name[nlen] > (__le16 *)Add2Ptr(rp, i_size))
+       if (!ulen || uname + ulen > (__le16 *)Add2Ptr(rp, size))
                goto out;
 
        /* If name is already zero terminated then truncate it now. */
-       if (!uni->name[nlen - 1])
-               nlen -= 1;
-       uni->len = nlen;
+       if (!uname[ulen - 1])
+               ulen -= 1;
 
-       err = ntfs_utf16_to_nls(sbi, uni, buffer, buflen);
+       err = ntfs_utf16_to_nls(sbi, uname, ulen, buffer, buflen);
 
        if (err < 0)
                goto out;
index 2d70ae42f1b511f3b43af7a9b886ac1ffd21822b..dd7ced000d0e755ca1b337273dab7577c194a7c4 100644 (file)
@@ -5,6 +5,9 @@
  * Copyright (C) 2015 Eric Biggers
  */
 
+#ifndef _LINUX_NTFS3_LIB_DECOMPRESS_COMMON_H
+#define _LINUX_NTFS3_LIB_DECOMPRESS_COMMON_H
+
 #include <linux/string.h>
 #include <linux/compiler.h>
 #include <linux/types.h>
@@ -336,3 +339,5 @@ static forceinline u8 *lz_copy(u8 *dst, u32 length, u32 offset, const u8 *bufend
 
        return dst;
 }
+
+#endif /* _LINUX_NTFS3_LIB_DECOMPRESS_COMMON_H */
index f508fbad2e712d946274b13f0ec7b244dc264a4d..90309a5ae59c7a98f3f0b61f5883fe390790eb50 100644 (file)
@@ -7,6 +7,10 @@
  * - linux kernel code style
  */
 
+#ifndef _LINUX_NTFS3_LIB_LIB_H
+#define _LINUX_NTFS3_LIB_LIB_H
+
+#include <linux/types.h>
 
 /* globals from xpress_decompress.c */
 struct xpress_decompressor *xpress_allocate_decompressor(void);
@@ -24,3 +28,5 @@ int lzx_decompress(struct lzx_decompressor *__restrict d,
                   const void *__restrict compressed_data,
                   size_t compressed_size, void *__restrict uncompressed_data,
                   size_t uncompressed_size);
+
+#endif /* _LINUX_NTFS3_LIB_LIB_H */
index f1f691a67cc490254957f062cfc38ec12ac7e834..28f654561f279a5d09ddd4a1352930b0fefb5416 100644 (file)
@@ -5,13 +5,13 @@
  *
  */
 
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
-#include <linux/fs.h>
-#include <linux/nls.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
 
 #include "debug.h"
-#include "ntfs.h"
 #include "ntfs_fs.h"
 
 // clang-format off
@@ -292,7 +292,7 @@ next:
 /*
  * get_lznt_ctx
  * @level: 0 - Standard compression.
- *        !0 - Best compression, requires a lot of cpu.
+ *        !0 - Best compression, requires a lot of cpu.
  */
 struct lznt *get_lznt_ctx(int level)
 {
index e58415d0713280297df1b7bac30eb17642431fa0..bc741213ad84833b7fd434c32d4b784c6e7cb2f7 100644 (file)
@@ -5,11 +5,7 @@
  *
  */
 
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
 #include <linux/fs.h>
-#include <linux/iversion.h>
-#include <linux/namei.h>
 #include <linux/nls.h>
 
 #include "debug.h"
@@ -99,16 +95,11 @@ static struct dentry *ntfs_lookup(struct inode *dir, struct dentry *dentry,
 static int ntfs_create(struct user_namespace *mnt_userns, struct inode *dir,
                       struct dentry *dentry, umode_t mode, bool excl)
 {
-       struct ntfs_inode *ni = ntfs_i(dir);
        struct inode *inode;
 
-       ni_lock_dir(ni);
-
        inode = ntfs_create_inode(mnt_userns, dir, dentry, NULL, S_IFREG | mode,
                                  0, NULL, 0, NULL);
 
-       ni_unlock(ni);
-
        return IS_ERR(inode) ? PTR_ERR(inode) : 0;
 }
 
@@ -120,16 +111,11 @@ static int ntfs_create(struct user_namespace *mnt_userns, struct inode *dir,
 static int ntfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
                      struct dentry *dentry, umode_t mode, dev_t rdev)
 {
-       struct ntfs_inode *ni = ntfs_i(dir);
        struct inode *inode;
 
-       ni_lock_dir(ni);
-
        inode = ntfs_create_inode(mnt_userns, dir, dentry, NULL, mode, rdev,
                                  NULL, 0, NULL);
 
-       ni_unlock(ni);
-
        return IS_ERR(inode) ? PTR_ERR(inode) : 0;
 }
 
@@ -200,15 +186,10 @@ static int ntfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
 {
        u32 size = strlen(symname);
        struct inode *inode;
-       struct ntfs_inode *ni = ntfs_i(dir);
-
-       ni_lock_dir(ni);
 
        inode = ntfs_create_inode(mnt_userns, dir, dentry, NULL, S_IFLNK | 0777,
                                  0, symname, size, NULL);
 
-       ni_unlock(ni);
-
        return IS_ERR(inode) ? PTR_ERR(inode) : 0;
 }
 
@@ -219,15 +200,10 @@ static int ntfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
                      struct dentry *dentry, umode_t mode)
 {
        struct inode *inode;
-       struct ntfs_inode *ni = ntfs_i(dir);
-
-       ni_lock_dir(ni);
 
        inode = ntfs_create_inode(mnt_userns, dir, dentry, NULL, S_IFDIR | mode,
                                  0, NULL, 0, NULL);
 
-       ni_unlock(ni);
-
        return IS_ERR(inode) ? PTR_ERR(inode) : 0;
 }
 
index 6bb3e595263b67c7cc66a0fecc5dfa68310826d4..9cc396b117bfd9b02531ad6c6de89ded53bba7f3 100644 (file)
 #ifndef _LINUX_NTFS3_NTFS_H
 #define _LINUX_NTFS3_NTFS_H
 
-/* TODO: Check 4K MFT record and 512 bytes cluster. */
+#include <linux/blkdev.h>
+#include <linux/build_bug.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "debug.h"
 
-/* Activate this define to use binary search in indexes. */
-#define NTFS3_INDEX_BINARY_SEARCH
+/* TODO: Check 4K MFT record and 512 bytes cluster. */
 
 /* Check each run for marked clusters. */
 #define NTFS3_CHECK_FREE_CLST
 
 #define NTFS_NAME_LEN 255
 
-/* ntfs.sys used 500 maximum links on-disk struct allows up to 0xffff. */
-#define NTFS_LINK_MAX 0x400
-//#define NTFS_LINK_MAX 0xffff
+/*
+ * ntfs.sys used 500 maximum links on-disk struct allows up to 0xffff.
+ * xfstest generic/041 creates 3003 hardlinks.
+ */
+#define NTFS_LINK_MAX 4000
 
 /*
  * Activate to use 64 bit clusters instead of 32 bits in ntfs.sys.
index dc71c59fd44545118b6ab3023492aa8e4e75b017..8aaec7e0804efaa4b66727bf0d7360f68abba8c0 100644 (file)
@@ -9,6 +9,37 @@
 #ifndef _LINUX_NTFS3_NTFS_FS_H
 #define _LINUX_NTFS3_NTFS_FS_H
 
+#include <linux/blkdev.h>
+#include <linux/buffer_head.h>
+#include <linux/cleancache.h>
+#include <linux/fs.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/page-flags.h>
+#include <linux/pagemap.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/time64.h>
+#include <linux/types.h>
+#include <linux/uidgid.h>
+#include <asm/div64.h>
+#include <asm/page.h>
+
+#include "debug.h"
+#include "ntfs.h"
+
+struct dentry;
+struct fiemap_extent_info;
+struct user_namespace;
+struct page;
+struct writeback_control;
+enum utf16_endian;
+
+
 #define MINUS_ONE_T                    ((size_t)(-1))
 /* Biggest MFT / smallest cluster */
 #define MAXIMUM_BYTES_PER_MFT          4096
@@ -52,6 +83,7 @@
 // clang-format on
 
 struct ntfs_mount_options {
+       char *nls_name;
        struct nls_table *nls;
 
        kuid_t fs_uid;
@@ -59,19 +91,16 @@ struct ntfs_mount_options {
        u16 fs_fmask_inv;
        u16 fs_dmask_inv;
 
-       unsigned uid : 1, /* uid was set. */
-               gid : 1, /* gid was set. */
-               fmask : 1, /* fmask was set. */
-               dmask : 1, /* dmask was set. */
-               sys_immutable : 1, /* Immutable system files. */
-               discard : 1, /* Issue discard requests on deletions. */
-               sparse : 1, /* Create sparse files. */
-               showmeta : 1, /* Show meta files. */
-               nohidden : 1, /* Do not show hidden files. */
-               force : 1, /* Rw mount dirty volume. */
-               no_acs_rules : 1, /*Exclude acs rules. */
-               prealloc : 1 /* Preallocate space when file is growing. */
-               ;
+       unsigned fmask : 1; /* fmask was set. */
+       unsigned dmask : 1; /*dmask was set. */
+       unsigned sys_immutable : 1; /* Immutable system files. */
+       unsigned discard : 1; /* Issue discard requests on deletions. */
+       unsigned sparse : 1; /* Create sparse files. */
+       unsigned showmeta : 1; /* Show meta files. */
+       unsigned nohidden : 1; /* Do not show hidden files. */
+       unsigned force : 1; /* RW mount dirty volume. */
+       unsigned noacsrules : 1; /* Exclude acs rules. */
+       unsigned prealloc : 1; /* Preallocate space when file is growing. */
 };
 
 /* Special value to unpack and deallocate. */
@@ -182,10 +211,8 @@ struct ntfs_sb_info {
        u32 blocks_per_cluster; // cluster_size / sb->s_blocksize
 
        u32 record_size;
-       u32 sector_size;
        u32 index_size;
 
-       u8 sector_bits;
        u8 cluster_bits;
        u8 record_bits;
 
@@ -279,7 +306,7 @@ struct ntfs_sb_info {
 #endif
        } compress;
 
-       struct ntfs_mount_options options;
+       struct ntfs_mount_options *options;
        struct ratelimit_state msg_ratelimit;
 };
 
@@ -436,7 +463,7 @@ bool al_remove_le(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le);
 bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn,
                  const __le16 *name, size_t name_len,
                  const struct MFT_REF *ref);
-int al_update(struct ntfs_inode *ni);
+int al_update(struct ntfs_inode *ni, int sync);
 static inline size_t al_aligned(size_t size)
 {
        return (size + 1023) & ~(size_t)1023;
@@ -448,7 +475,7 @@ bool are_bits_set(const ulong *map, size_t bit, size_t nbits);
 size_t get_set_bits_ex(const ulong *map, size_t bit, size_t nbits);
 
 /* Globals from dir.c */
-int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const struct le_str *uni,
+int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const __le16 *name, u32 len,
                      u8 *buf, int buf_len);
 int ntfs_nls_to_utf16(struct ntfs_sb_info *sbi, const u8 *name, u32 name_len,
                      struct cpu_str *uni, u32 max_ulen,
@@ -520,7 +547,7 @@ struct ATTR_FILE_NAME *ni_fname_type(struct ntfs_inode *ni, u8 name_type,
                                     struct ATTR_LIST_ENTRY **entry);
 int ni_new_attr_flags(struct ntfs_inode *ni, enum FILE_ATTRIBUTE new_fa);
 enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
-                                  void *buffer);
+                                  struct REPARSE_DATA_BUFFER *buffer);
 int ni_write_inode(struct inode *inode, int sync, const char *hint);
 #define _ni_write_inode(i, w) ni_write_inode(i, w, __func__)
 int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
@@ -577,7 +604,7 @@ int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer);
 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
                  const void *buffer, int wait);
 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
-                     u64 vbo, const void *buf, size_t bytes);
+                     u64 vbo, const void *buf, size_t bytes, int sync);
 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
                                   const struct runs_tree *run, u64 vbo);
 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
index 103705c86772f45a90227e6434449e99c4d7d94b..861e35791506e801dc446414d935d5463e64a1fb 100644 (file)
@@ -5,10 +5,7 @@
  *
  */
 
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
 #include <linux/fs.h>
-#include <linux/nls.h>
 
 #include "debug.h"
 #include "ntfs.h"
index 26ed2b64345e665a39c6b37d88b29c5b81592ecd..a8fec651f9732878ad871cc476d8a6fcf28f9312 100644 (file)
@@ -7,10 +7,8 @@
  */
 
 #include <linux/blkdev.h>
-#include <linux/buffer_head.h>
 #include <linux/fs.h>
 #include <linux/log2.h>
-#include <linux/nls.h>
 
 #include "debug.h"
 #include "ntfs.h"
index 55bbc9200a10ebdf3011e0a517eb44afd8a35f3f..d41d76979e121fd9e836ef0072b122ab6de3735b 100644 (file)
  *
  */
 
-#include <linux/backing-dev.h>
 #include <linux/blkdev.h>
 #include <linux/buffer_head.h>
 #include <linux/exportfs.h>
 #include <linux/fs.h>
-#include <linux/iversion.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
 #include <linux/log2.h>
 #include <linux/module.h>
 #include <linux/nls.h>
-#include <linux/parser.h>
 #include <linux/seq_file.h>
 #include <linux/statfs.h>
 
@@ -205,9 +204,11 @@ void *ntfs_put_shared(void *ptr)
        return ret;
 }
 
-static inline void clear_mount_options(struct ntfs_mount_options *options)
+static inline void put_mount_options(struct ntfs_mount_options *options)
 {
+       kfree(options->nls_name);
        unload_nls(options->nls);
+       kfree(options);
 }
 
 enum Opt {
@@ -223,218 +224,175 @@ enum Opt {
        Opt_nohidden,
        Opt_showmeta,
        Opt_acl,
-       Opt_noatime,
-       Opt_nls,
+       Opt_iocharset,
        Opt_prealloc,
-       Opt_no_acs_rules,
+       Opt_noacsrules,
        Opt_err,
 };
 
-static const match_table_t ntfs_tokens = {
-       { Opt_uid, "uid=%u" },
-       { Opt_gid, "gid=%u" },
-       { Opt_umask, "umask=%o" },
-       { Opt_dmask, "dmask=%o" },
-       { Opt_fmask, "fmask=%o" },
-       { Opt_immutable, "sys_immutable" },
-       { Opt_discard, "discard" },
-       { Opt_force, "force" },
-       { Opt_sparse, "sparse" },
-       { Opt_nohidden, "nohidden" },
-       { Opt_acl, "acl" },
-       { Opt_noatime, "noatime" },
-       { Opt_showmeta, "showmeta" },
-       { Opt_nls, "nls=%s" },
-       { Opt_prealloc, "prealloc" },
-       { Opt_no_acs_rules, "no_acs_rules" },
-       { Opt_err, NULL },
+static const struct fs_parameter_spec ntfs_fs_parameters[] = {
+       fsparam_u32("uid",                      Opt_uid),
+       fsparam_u32("gid",                      Opt_gid),
+       fsparam_u32oct("umask",                 Opt_umask),
+       fsparam_u32oct("dmask",                 Opt_dmask),
+       fsparam_u32oct("fmask",                 Opt_fmask),
+       fsparam_flag_no("sys_immutable",        Opt_immutable),
+       fsparam_flag_no("discard",              Opt_discard),
+       fsparam_flag_no("force",                Opt_force),
+       fsparam_flag_no("sparse",               Opt_sparse),
+       fsparam_flag_no("hidden",               Opt_nohidden),
+       fsparam_flag_no("acl",                  Opt_acl),
+       fsparam_flag_no("showmeta",             Opt_showmeta),
+       fsparam_flag_no("prealloc",             Opt_prealloc),
+       fsparam_flag_no("acsrules",             Opt_noacsrules),
+       fsparam_string("iocharset",             Opt_iocharset),
+       {}
 };
 
-static noinline int ntfs_parse_options(struct super_block *sb, char *options,
-                                      int silent,
-                                      struct ntfs_mount_options *opts)
+/*
+ * Load nls table or if @nls is utf8 then return NULL.
+ */
+static struct nls_table *ntfs_load_nls(char *nls)
 {
-       char *p;
-       substring_t args[MAX_OPT_ARGS];
-       int option;
-       char nls_name[30];
-       struct nls_table *nls;
+       struct nls_table *ret;
 
-       opts->fs_uid = current_uid();
-       opts->fs_gid = current_gid();
-       opts->fs_fmask_inv = opts->fs_dmask_inv = ~current_umask();
-       nls_name[0] = 0;
+       if (!nls)
+               nls = CONFIG_NLS_DEFAULT;
 
-       if (!options)
-               goto out;
+       if (strcmp(nls, "utf8") == 0)
+               return NULL;
 
-       while ((p = strsep(&options, ","))) {
-               int token;
+       if (strcmp(nls, CONFIG_NLS_DEFAULT) == 0)
+               return load_nls_default();
 
-               if (!*p)
-                       continue;
+       ret = load_nls(nls);
+       if (ret)
+               return ret;
 
-               token = match_token(p, ntfs_tokens, args);
-               switch (token) {
-               case Opt_immutable:
-                       opts->sys_immutable = 1;
-                       break;
-               case Opt_uid:
-                       if (match_int(&args[0], &option))
-                               return -EINVAL;
-                       opts->fs_uid = make_kuid(current_user_ns(), option);
-                       if (!uid_valid(opts->fs_uid))
-                               return -EINVAL;
-                       opts->uid = 1;
-                       break;
-               case Opt_gid:
-                       if (match_int(&args[0], &option))
-                               return -EINVAL;
-                       opts->fs_gid = make_kgid(current_user_ns(), option);
-                       if (!gid_valid(opts->fs_gid))
-                               return -EINVAL;
-                       opts->gid = 1;
-                       break;
-               case Opt_umask:
-                       if (match_octal(&args[0], &option))
-                               return -EINVAL;
-                       opts->fs_fmask_inv = opts->fs_dmask_inv = ~option;
-                       opts->fmask = opts->dmask = 1;
-                       break;
-               case Opt_dmask:
-                       if (match_octal(&args[0], &option))
-                               return -EINVAL;
-                       opts->fs_dmask_inv = ~option;
-                       opts->dmask = 1;
-                       break;
-               case Opt_fmask:
-                       if (match_octal(&args[0], &option))
-                               return -EINVAL;
-                       opts->fs_fmask_inv = ~option;
-                       opts->fmask = 1;
-                       break;
-               case Opt_discard:
-                       opts->discard = 1;
-                       break;
-               case Opt_force:
-                       opts->force = 1;
-                       break;
-               case Opt_sparse:
-                       opts->sparse = 1;
-                       break;
-               case Opt_nohidden:
-                       opts->nohidden = 1;
-                       break;
-               case Opt_acl:
+       return ERR_PTR(-EINVAL);
+}
+
+static int ntfs_fs_parse_param(struct fs_context *fc,
+                              struct fs_parameter *param)
+{
+       struct ntfs_mount_options *opts = fc->fs_private;
+       struct fs_parse_result result;
+       int opt;
+
+       opt = fs_parse(fc, ntfs_fs_parameters, param, &result);
+       if (opt < 0)
+               return opt;
+
+       switch (opt) {
+       case Opt_uid:
+               opts->fs_uid = make_kuid(current_user_ns(), result.uint_32);
+               if (!uid_valid(opts->fs_uid))
+                       return invalf(fc, "ntfs3: Invalid value for uid.");
+               break;
+       case Opt_gid:
+               opts->fs_gid = make_kgid(current_user_ns(), result.uint_32);
+               if (!gid_valid(opts->fs_gid))
+                       return invalf(fc, "ntfs3: Invalid value for gid.");
+               break;
+       case Opt_umask:
+               if (result.uint_32 & ~07777)
+                       return invalf(fc, "ntfs3: Invalid value for umask.");
+               opts->fs_fmask_inv = ~result.uint_32;
+               opts->fs_dmask_inv = ~result.uint_32;
+               opts->fmask = 1;
+               opts->dmask = 1;
+               break;
+       case Opt_dmask:
+               if (result.uint_32 & ~07777)
+                       return invalf(fc, "ntfs3: Invalid value for dmask.");
+               opts->fs_dmask_inv = ~result.uint_32;
+               opts->dmask = 1;
+               break;
+       case Opt_fmask:
+               if (result.uint_32 & ~07777)
+                       return invalf(fc, "ntfs3: Invalid value for fmask.");
+               opts->fs_fmask_inv = ~result.uint_32;
+               opts->fmask = 1;
+               break;
+       case Opt_immutable:
+               opts->sys_immutable = result.negated ? 0 : 1;
+               break;
+       case Opt_discard:
+               opts->discard = result.negated ? 0 : 1;
+               break;
+       case Opt_force:
+               opts->force = result.negated ? 0 : 1;
+               break;
+       case Opt_sparse:
+               opts->sparse = result.negated ? 0 : 1;
+               break;
+       case Opt_nohidden:
+               opts->nohidden = result.negated ? 1 : 0;
+               break;
+       case Opt_acl:
+               if (!result.negated)
 #ifdef CONFIG_NTFS3_FS_POSIX_ACL
-                       sb->s_flags |= SB_POSIXACL;
-                       break;
+                       fc->sb_flags |= SB_POSIXACL;
 #else
-                       ntfs_err(sb, "support for ACL not compiled in!");
-                       return -EINVAL;
+                       return invalf(fc, "ntfs3: Support for ACL not compiled in!");
 #endif
-               case Opt_noatime:
-                       sb->s_flags |= SB_NOATIME;
-                       break;
-               case Opt_showmeta:
-                       opts->showmeta = 1;
-                       break;
-               case Opt_nls:
-                       match_strlcpy(nls_name, &args[0], sizeof(nls_name));
-                       break;
-               case Opt_prealloc:
-                       opts->prealloc = 1;
-                       break;
-               case Opt_no_acs_rules:
-                       opts->no_acs_rules = 1;
-                       break;
-               default:
-                       if (!silent)
-                               ntfs_err(
-                                       sb,
-                                       "Unrecognized mount option \"%s\" or missing value",
-                                       p);
-                       //return -EINVAL;
-               }
-       }
-
-out:
-       if (!strcmp(nls_name[0] ? nls_name : CONFIG_NLS_DEFAULT, "utf8")) {
-               /*
-                * For UTF-8 use utf16s_to_utf8s()/utf8s_to_utf16s()
-                * instead of NLS.
-                */
-               nls = NULL;
-       } else if (nls_name[0]) {
-               nls = load_nls(nls_name);
-               if (!nls) {
-                       ntfs_err(sb, "failed to load \"%s\"", nls_name);
-                       return -EINVAL;
-               }
-       } else {
-               nls = load_nls_default();
-               if (!nls) {
-                       ntfs_err(sb, "failed to load default nls");
-                       return -EINVAL;
-               }
+               else
+                       fc->sb_flags &= ~SB_POSIXACL;
+               break;
+       case Opt_showmeta:
+               opts->showmeta = result.negated ? 0 : 1;
+               break;
+       case Opt_iocharset:
+               kfree(opts->nls_name);
+               opts->nls_name = param->string;
+               param->string = NULL;
+               break;
+       case Opt_prealloc:
+               opts->prealloc = result.negated ? 0 : 1;
+               break;
+       case Opt_noacsrules:
+               opts->noacsrules = result.negated ? 1 : 0;
+               break;
+       default:
+               /* Should not be here unless we forget add case. */
+               return -EINVAL;
        }
-       opts->nls = nls;
-
        return 0;
 }
 
-static int ntfs_remount(struct super_block *sb, int *flags, char *data)
+static int ntfs_fs_reconfigure(struct fs_context *fc)
 {
-       int err, ro_rw;
+       struct super_block *sb = fc->root->d_sb;
        struct ntfs_sb_info *sbi = sb->s_fs_info;
-       struct ntfs_mount_options old_opts;
-       char *orig_data = kstrdup(data, GFP_KERNEL);
-
-       if (data && !orig_data)
-               return -ENOMEM;
+       struct ntfs_mount_options *new_opts = fc->fs_private;
+       int ro_rw;
 
-       /* Store  original options. */
-       memcpy(&old_opts, &sbi->options, sizeof(old_opts));
-       clear_mount_options(&sbi->options);
-       memset(&sbi->options, 0, sizeof(sbi->options));
-
-       err = ntfs_parse_options(sb, data, 0, &sbi->options);
-       if (err)
-               goto restore_opts;
-
-       ro_rw = sb_rdonly(sb) && !(*flags & SB_RDONLY);
+       ro_rw = sb_rdonly(sb) && !(fc->sb_flags & SB_RDONLY);
        if (ro_rw && (sbi->flags & NTFS_FLAGS_NEED_REPLAY)) {
-               ntfs_warn(
-                       sb,
-                       "Couldn't remount rw because journal is not replayed. Please umount/remount instead\n");
-               err = -EINVAL;
-               goto restore_opts;
+               errorf(fc, "ntfs3: Couldn't remount rw because journal is not replayed. Please umount/remount instead\n");
+               return -EINVAL;
        }
 
+       new_opts->nls = ntfs_load_nls(new_opts->nls_name);
+       if (IS_ERR(new_opts->nls)) {
+               new_opts->nls = NULL;
+               errorf(fc, "ntfs3: Cannot load iocharset %s", new_opts->nls_name);
+               return -EINVAL;
+       }
+       if (new_opts->nls != sbi->options->nls)
+               return invalf(fc, "ntfs3: Cannot use different iocharset when remounting!");
+
        sync_filesystem(sb);
 
        if (ro_rw && (sbi->volume.flags & VOLUME_FLAG_DIRTY) &&
-           !sbi->options.force) {
-               ntfs_warn(sb, "volume is dirty and \"force\" flag is not set!");
-               err = -EINVAL;
-               goto restore_opts;
+           !new_opts->force) {
+               errorf(fc, "ntfs3: Volume is dirty and \"force\" flag is not set!");
+               return -EINVAL;
        }
 
-       clear_mount_options(&old_opts);
-
-       *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME) |
-                SB_NODIRATIME | SB_NOATIME;
-       ntfs_info(sb, "re-mounted. Opts: %s", orig_data);
-       err = 0;
-       goto out;
+       memcpy(sbi->options, new_opts, sizeof(*new_opts));
 
-restore_opts:
-       clear_mount_options(&sbi->options);
-       memcpy(&sbi->options, &old_opts, sizeof(old_opts));
-
-out:
-       kfree(orig_data);
-       return err;
+       return 0;
 }
 
 static struct kmem_cache *ntfs_inode_cachep;
@@ -513,8 +471,6 @@ static noinline void put_ntfs(struct ntfs_sb_info *sbi)
        xpress_free_decompressor(sbi->compress.xpress);
        lzx_free_decompressor(sbi->compress.lzx);
 #endif
-       clear_mount_options(&sbi->options);
-
        kfree(sbi);
 }
 
@@ -525,7 +481,9 @@ static void ntfs_put_super(struct super_block *sb)
        /* Mark rw ntfs as clear, if possible. */
        ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
 
+       put_mount_options(sbi->options);
        put_ntfs(sbi);
+       sb->s_fs_info = NULL;
 
        sync_blockdev(sb->s_bdev);
 }
@@ -552,23 +510,21 @@ static int ntfs_show_options(struct seq_file *m, struct dentry *root)
 {
        struct super_block *sb = root->d_sb;
        struct ntfs_sb_info *sbi = sb->s_fs_info;
-       struct ntfs_mount_options *opts = &sbi->options;
+       struct ntfs_mount_options *opts = sbi->options;
        struct user_namespace *user_ns = seq_user_ns(m);
 
-       if (opts->uid)
-               seq_printf(m, ",uid=%u",
-                          from_kuid_munged(user_ns, opts->fs_uid));
-       if (opts->gid)
-               seq_printf(m, ",gid=%u",
-                          from_kgid_munged(user_ns, opts->fs_gid));
+       seq_printf(m, ",uid=%u",
+                 from_kuid_munged(user_ns, opts->fs_uid));
+       seq_printf(m, ",gid=%u",
+                 from_kgid_munged(user_ns, opts->fs_gid));
        if (opts->fmask)
                seq_printf(m, ",fmask=%04o", ~opts->fs_fmask_inv);
        if (opts->dmask)
                seq_printf(m, ",dmask=%04o", ~opts->fs_dmask_inv);
        if (opts->nls)
-               seq_printf(m, ",nls=%s", opts->nls->charset);
+               seq_printf(m, ",iocharset=%s", opts->nls->charset);
        else
-               seq_puts(m, ",nls=utf8");
+               seq_puts(m, ",iocharset=utf8");
        if (opts->sys_immutable)
                seq_puts(m, ",sys_immutable");
        if (opts->discard)
@@ -581,14 +537,12 @@ static int ntfs_show_options(struct seq_file *m, struct dentry *root)
                seq_puts(m, ",nohidden");
        if (opts->force)
                seq_puts(m, ",force");
-       if (opts->no_acs_rules)
-               seq_puts(m, ",no_acs_rules");
+       if (opts->noacsrules)
+               seq_puts(m, ",noacsrules");
        if (opts->prealloc)
                seq_puts(m, ",prealloc");
        if (sb->s_flags & SB_POSIXACL)
                seq_puts(m, ",acl");
-       if (sb->s_flags & SB_NOATIME)
-               seq_puts(m, ",noatime");
 
        return 0;
 }
@@ -643,7 +597,6 @@ static const struct super_operations ntfs_sops = {
        .statfs = ntfs_statfs,
        .show_options = ntfs_show_options,
        .sync_fs = ntfs_sync_fs,
-       .remount_fs = ntfs_remount,
        .write_inode = ntfs3_write_inode,
 };
 
@@ -729,7 +682,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
        struct ntfs_sb_info *sbi = sb->s_fs_info;
        int err;
        u32 mb, gb, boot_sector_size, sct_per_clst, record_size;
-       u64 sectors, clusters, fs_size, mlcn, mlcn2;
+       u64 sectors, clusters, mlcn, mlcn2;
        struct NTFS_BOOT *boot;
        struct buffer_head *bh;
        struct MFT_REC *rec;
@@ -787,20 +740,20 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
                goto out;
        }
 
-       sbi->sector_size = boot_sector_size;
-       sbi->sector_bits = blksize_bits(boot_sector_size);
-       fs_size = (sectors + 1) << sbi->sector_bits;
+       sbi->volume.size = sectors * boot_sector_size;
 
-       gb = format_size_gb(fs_size, &mb);
+       gb = format_size_gb(sbi->volume.size + boot_sector_size, &mb);
 
        /*
         * - Volume formatted and mounted with the same sector size.
         * - Volume formatted 4K and mounted as 512.
         * - Volume formatted 512 and mounted as 4K.
         */
-       if (sbi->sector_size != sector_size) {
-               ntfs_warn(sb,
-                         "Different NTFS' sector size and media sector size");
+       if (boot_sector_size != sector_size) {
+               ntfs_warn(
+                       sb,
+                       "Different NTFS' sector size (%u) and media sector size (%u)",
+                       boot_sector_size, sector_size);
                dev_size += sector_size - 1;
        }
 
@@ -810,8 +763,19 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
        sbi->mft.lbo = mlcn << sbi->cluster_bits;
        sbi->mft.lbo2 = mlcn2 << sbi->cluster_bits;
 
-       if (sbi->cluster_size < sbi->sector_size)
+       /* Compare boot's cluster and sector. */
+       if (sbi->cluster_size < boot_sector_size)
+               goto out;
+
+       /* Compare boot's cluster and media sector. */
+       if (sbi->cluster_size < sector_size) {
+               /* No way to use ntfs_get_block in this case. */
+               ntfs_err(
+                       sb,
+                       "Failed to mount 'cause NTFS's cluster size (%u) is less than media sector size (%u)",
+                       sbi->cluster_size, sector_size);
                goto out;
+       }
 
        sbi->cluster_mask = sbi->cluster_size - 1;
        sbi->cluster_mask_inv = ~(u64)sbi->cluster_mask;
@@ -836,10 +800,9 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
                                  : (u32)boot->index_size << sbi->cluster_bits;
 
        sbi->volume.ser_num = le64_to_cpu(boot->serial_num);
-       sbi->volume.size = sectors << sbi->sector_bits;
 
        /* Warning if RAW volume. */
-       if (dev_size < fs_size) {
+       if (dev_size < sbi->volume.size + boot_sector_size) {
                u32 mb0, gb0;
 
                gb0 = format_size_gb(dev_size, &mb0);
@@ -883,8 +846,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
        rec->total = cpu_to_le32(sbi->record_size);
        ((struct ATTRIB *)Add2Ptr(rec, ao))->type = ATTR_END;
 
-       if (sbi->cluster_size < PAGE_SIZE)
-               sb_set_blocksize(sb, sbi->cluster_size);
+       sb_set_blocksize(sb, min_t(u32, sbi->cluster_size, PAGE_SIZE));
 
        sbi->block_mask = sb->s_blocksize - 1;
        sbi->blocks_per_cluster = sbi->cluster_size >> sb->s_blocksize_bits;
@@ -897,9 +859,11 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
        if (clusters >= (1ull << (64 - sbi->cluster_bits)))
                sbi->maxbytes = -1;
        sbi->maxbytes_sparse = -1;
+       sb->s_maxbytes = MAX_LFS_FILESIZE;
 #else
        /* Maximum size for sparse file. */
        sbi->maxbytes_sparse = (1ull << (sbi->cluster_bits + 32)) - 1;
+       sb->s_maxbytes = 0xFFFFFFFFull << sbi->cluster_bits;
 #endif
 
        err = 0;
@@ -913,14 +877,13 @@ out:
 /*
  * ntfs_fill_super - Try to mount.
  */
-static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
+static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
 {
        int err;
-       struct ntfs_sb_info *sbi;
+       struct ntfs_sb_info *sbi = sb->s_fs_info;
        struct block_device *bdev = sb->s_bdev;
-       struct inode *bd_inode = bdev->bd_inode;
-       struct request_queue *rq = bdev_get_queue(bdev);
-       struct inode *inode = NULL;
+       struct request_queue *rq;
+       struct inode *inode;
        struct ntfs_inode *ni;
        size_t i, tt;
        CLST vcn, lcn, len;
@@ -928,18 +891,11 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
        const struct VOLUME_INFO *info;
        u32 idx, done, bytes;
        struct ATTR_DEF_ENTRY *t;
-       u16 *upcase = NULL;
        u16 *shared;
-       bool is_ro;
        struct MFT_REF ref;
 
        ref.high = 0;
 
-       sbi = kzalloc(sizeof(struct ntfs_sb_info), GFP_NOFS);
-       if (!sbi)
-               return -ENOMEM;
-
-       sb->s_fs_info = sbi;
        sbi->sb = sb;
        sb->s_flags |= SB_NODIRATIME;
        sb->s_magic = 0x7366746e; // "ntfs"
@@ -948,41 +904,27 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_time_gran = NTFS_TIME_GRAN; // 100 nsec
        sb->s_xattr = ntfs_xattr_handlers;
 
-       ratelimit_state_init(&sbi->msg_ratelimit, DEFAULT_RATELIMIT_INTERVAL,
-                            DEFAULT_RATELIMIT_BURST);
-
-       err = ntfs_parse_options(sb, data, silent, &sbi->options);
-       if (err)
+       sbi->options->nls = ntfs_load_nls(sbi->options->nls_name);
+       if (IS_ERR(sbi->options->nls)) {
+               sbi->options->nls = NULL;
+               errorf(fc, "Cannot load nls %s", sbi->options->nls_name);
+               err = -EINVAL;
                goto out;
+       }
 
-       if (!rq || !blk_queue_discard(rq) || !rq->limits.discard_granularity) {
-               ;
-       } else {
+       rq = bdev_get_queue(bdev);
+       if (blk_queue_discard(rq) && rq->limits.discard_granularity) {
                sbi->discard_granularity = rq->limits.discard_granularity;
                sbi->discard_granularity_mask_inv =
                        ~(u64)(sbi->discard_granularity - 1);
        }
 
-       sb_set_blocksize(sb, PAGE_SIZE);
-
        /* Parse boot. */
        err = ntfs_init_from_boot(sb, rq ? queue_logical_block_size(rq) : 512,
-                                 bd_inode->i_size);
+                                 bdev->bd_inode->i_size);
        if (err)
                goto out;
 
-#ifdef CONFIG_NTFS3_64BIT_CLUSTER
-       sb->s_maxbytes = MAX_LFS_FILESIZE;
-#else
-       sb->s_maxbytes = 0xFFFFFFFFull << sbi->cluster_bits;
-#endif
-
-       mutex_init(&sbi->compress.mtx_lznt);
-#ifdef CONFIG_NTFS3_LZX_XPRESS
-       mutex_init(&sbi->compress.mtx_xpress);
-       mutex_init(&sbi->compress.mtx_lzx);
-#endif
-
        /*
         * Load $Volume. This should be done before $LogFile
         * 'cause 'sbi->volume.ni' is used 'ntfs_set_state'.
@@ -991,9 +933,8 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
        ref.seq = cpu_to_le16(MFT_REC_VOL);
        inode = ntfs_iget5(sb, &ref, &NAME_VOLUME);
        if (IS_ERR(inode)) {
-               err = PTR_ERR(inode);
                ntfs_err(sb, "Failed to load $Volume.");
-               inode = NULL;
+               err = PTR_ERR(inode);
                goto out;
        }
 
@@ -1015,36 +956,33 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
        } else {
                /* Should we break mounting here? */
                //err = -EINVAL;
-               //goto out;
+               //goto put_inode_out;
        }
 
        attr = ni_find_attr(ni, attr, NULL, ATTR_VOL_INFO, NULL, 0, NULL, NULL);
        if (!attr || is_attr_ext(attr)) {
                err = -EINVAL;
-               goto out;
+               goto put_inode_out;
        }
 
        info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
        if (!info) {
                err = -EINVAL;
-               goto out;
+               goto put_inode_out;
        }
 
        sbi->volume.major_ver = info->major_ver;
        sbi->volume.minor_ver = info->minor_ver;
        sbi->volume.flags = info->flags;
-
        sbi->volume.ni = ni;
-       inode = NULL;
 
        /* Load $MFTMirr to estimate recs_mirr. */
        ref.low = cpu_to_le32(MFT_REC_MIRR);
        ref.seq = cpu_to_le16(MFT_REC_MIRR);
        inode = ntfs_iget5(sb, &ref, &NAME_MIRROR);
        if (IS_ERR(inode)) {
-               err = PTR_ERR(inode);
                ntfs_err(sb, "Failed to load $MFTMirr.");
-               inode = NULL;
+               err = PTR_ERR(inode);
                goto out;
        }
 
@@ -1058,9 +996,8 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
        ref.seq = cpu_to_le16(MFT_REC_LOG);
        inode = ntfs_iget5(sb, &ref, &NAME_LOGFILE);
        if (IS_ERR(inode)) {
-               err = PTR_ERR(inode);
                ntfs_err(sb, "Failed to load \x24LogFile.");
-               inode = NULL;
+               err = PTR_ERR(inode);
                goto out;
        }
 
@@ -1068,22 +1005,19 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
 
        err = ntfs_loadlog_and_replay(ni, sbi);
        if (err)
-               goto out;
+               goto put_inode_out;
 
        iput(inode);
-       inode = NULL;
-
-       is_ro = sb_rdonly(sbi->sb);
 
        if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
-               if (!is_ro) {
+               if (!sb_rdonly(sb)) {
                        ntfs_warn(sb,
                                  "failed to replay log file. Can't mount rw!");
                        err = -EINVAL;
                        goto out;
                }
        } else if (sbi->volume.flags & VOLUME_FLAG_DIRTY) {
-               if (!is_ro && !sbi->options.force) {
+               if (!sb_rdonly(sb) && !sbi->options->force) {
                        ntfs_warn(
                                sb,
                                "volume is dirty and \"force\" flag is not set!");
@@ -1098,9 +1032,8 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
 
        inode = ntfs_iget5(sb, &ref, &NAME_MFT);
        if (IS_ERR(inode)) {
-               err = PTR_ERR(inode);
                ntfs_err(sb, "Failed to load $MFT.");
-               inode = NULL;
+               err = PTR_ERR(inode);
                goto out;
        }
 
@@ -1112,11 +1045,11 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
 
        err = wnd_init(&sbi->mft.bitmap, sb, tt);
        if (err)
-               goto out;
+               goto put_inode_out;
 
        err = ni_load_all_mi(ni);
        if (err)
-               goto out;
+               goto put_inode_out;
 
        sbi->mft.ni = ni;
 
@@ -1125,9 +1058,8 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
        ref.seq = cpu_to_le16(MFT_REC_BADCLUST);
        inode = ntfs_iget5(sb, &ref, &NAME_BADCLUS);
        if (IS_ERR(inode)) {
-               err = PTR_ERR(inode);
                ntfs_err(sb, "Failed to load $BadClus.");
-               inode = NULL;
+               err = PTR_ERR(inode);
                goto out;
        }
 
@@ -1150,18 +1082,15 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
        ref.seq = cpu_to_le16(MFT_REC_BITMAP);
        inode = ntfs_iget5(sb, &ref, &NAME_BITMAP);
        if (IS_ERR(inode)) {
-               err = PTR_ERR(inode);
                ntfs_err(sb, "Failed to load $Bitmap.");
-               inode = NULL;
+               err = PTR_ERR(inode);
                goto out;
        }
 
-       ni = ntfs_i(inode);
-
 #ifndef CONFIG_NTFS3_64BIT_CLUSTER
        if (inode->i_size >> 32) {
                err = -EINVAL;
-               goto out;
+               goto put_inode_out;
        }
 #endif
 
@@ -1169,14 +1098,14 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
        tt = sbi->used.bitmap.nbits;
        if (inode->i_size < bitmap_size(tt)) {
                err = -EINVAL;
-               goto out;
+               goto put_inode_out;
        }
 
        /* Not necessary. */
        sbi->used.bitmap.set_tail = true;
-       err = wnd_init(&sbi->used.bitmap, sbi->sb, tt);
+       err = wnd_init(&sbi->used.bitmap, sb, tt);
        if (err)
-               goto out;
+               goto put_inode_out;
 
        iput(inode);
 
@@ -1188,23 +1117,22 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
        /* Load $AttrDef. */
        ref.low = cpu_to_le32(MFT_REC_ATTR);
        ref.seq = cpu_to_le16(MFT_REC_ATTR);
-       inode = ntfs_iget5(sbi->sb, &ref, &NAME_ATTRDEF);
+       inode = ntfs_iget5(sb, &ref, &NAME_ATTRDEF);
        if (IS_ERR(inode)) {
-               err = PTR_ERR(inode);
                ntfs_err(sb, "Failed to load $AttrDef -> %d", err);
-               inode = NULL;
+               err = PTR_ERR(inode);
                goto out;
        }
 
        if (inode->i_size < sizeof(struct ATTR_DEF_ENTRY)) {
                err = -EINVAL;
-               goto out;
+               goto put_inode_out;
        }
        bytes = inode->i_size;
        sbi->def_table = t = kmalloc(bytes, GFP_NOFS);
        if (!t) {
                err = -ENOMEM;
-               goto out;
+               goto put_inode_out;
        }
 
        for (done = idx = 0; done < bytes; done += PAGE_SIZE, idx++) {
@@ -1213,7 +1141,7 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
 
                if (IS_ERR(page)) {
                        err = PTR_ERR(page);
-                       goto out;
+                       goto put_inode_out;
                }
                memcpy(Add2Ptr(t, done), page_address(page),
                       min(PAGE_SIZE, tail));
@@ -1221,7 +1149,7 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
 
                if (!idx && ATTR_STD != t->type) {
                        err = -EINVAL;
-                       goto out;
+                       goto put_inode_out;
                }
        }
 
@@ -1254,33 +1182,24 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
        ref.seq = cpu_to_le16(MFT_REC_UPCASE);
        inode = ntfs_iget5(sb, &ref, &NAME_UPCASE);
        if (IS_ERR(inode)) {
+               ntfs_err(sb, "Failed to load $UpCase.");
                err = PTR_ERR(inode);
-               ntfs_err(sb, "Failed to load \x24LogFile.");
-               inode = NULL;
                goto out;
        }
 
-       ni = ntfs_i(inode);
-
        if (inode->i_size != 0x10000 * sizeof(short)) {
                err = -EINVAL;
-               goto out;
-       }
-
-       sbi->upcase = upcase = kvmalloc(0x10000 * sizeof(short), GFP_KERNEL);
-       if (!upcase) {
-               err = -ENOMEM;
-               goto out;
+               goto put_inode_out;
        }
 
        for (idx = 0; idx < (0x10000 * sizeof(short) >> PAGE_SHIFT); idx++) {
                const __le16 *src;
-               u16 *dst = Add2Ptr(upcase, idx << PAGE_SHIFT);
+               u16 *dst = Add2Ptr(sbi->upcase, idx << PAGE_SHIFT);
                struct page *page = ntfs_map_page(inode->i_mapping, idx);
 
                if (IS_ERR(page)) {
                        err = PTR_ERR(page);
-                       goto out;
+                       goto put_inode_out;
                }
 
                src = page_address(page);
@@ -1294,14 +1213,13 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
                ntfs_unmap_page(page);
        }
 
-       shared = ntfs_set_shared(upcase, 0x10000 * sizeof(short));
-       if (shared && upcase != shared) {
+       shared = ntfs_set_shared(sbi->upcase, 0x10000 * sizeof(short));
+       if (shared && sbi->upcase != shared) {
+               kvfree(sbi->upcase);
                sbi->upcase = shared;
-               kvfree(upcase);
        }
 
        iput(inode);
-       inode = NULL;
 
        if (is_ntfs3(sbi)) {
                /* Load $Secure. */
@@ -1331,34 +1249,31 @@ load_root:
        ref.seq = cpu_to_le16(MFT_REC_ROOT);
        inode = ntfs_iget5(sb, &ref, &NAME_ROOT);
        if (IS_ERR(inode)) {
-               err = PTR_ERR(inode);
                ntfs_err(sb, "Failed to load root.");
-               inode = NULL;
+               err = PTR_ERR(inode);
                goto out;
        }
 
-       ni = ntfs_i(inode);
-
        sb->s_root = d_make_root(inode);
-
        if (!sb->s_root) {
-               err = -EINVAL;
-               goto out;
+               err = -ENOMEM;
+               goto put_inode_out;
        }
 
+       fc->fs_private = NULL;
+
        return 0;
 
-out:
+put_inode_out:
        iput(inode);
-
-       if (sb->s_root) {
-               d_drop(sb->s_root);
-               sb->s_root = NULL;
-       }
-
+out:
+       /*
+        * Free resources here.
+        * ntfs_fs_free will be called with fc->s_fs_info = NULL
+        */
        put_ntfs(sbi);
-
        sb->s_fs_info = NULL;
+
        return err;
 }
 
@@ -1403,7 +1318,7 @@ int ntfs_discard(struct ntfs_sb_info *sbi, CLST lcn, CLST len)
        if (sbi->flags & NTFS_FLAGS_NODISCARD)
                return -EOPNOTSUPP;
 
-       if (!sbi->options.discard)
+       if (!sbi->options->discard)
                return -EOPNOTSUPP;
 
        lbo = (u64)lcn << sbi->cluster_bits;
@@ -1428,19 +1343,99 @@ int ntfs_discard(struct ntfs_sb_info *sbi, CLST lcn, CLST len)
        return err;
 }
 
-static struct dentry *ntfs_mount(struct file_system_type *fs_type, int flags,
-                                const char *dev_name, void *data)
+static int ntfs_fs_get_tree(struct fs_context *fc)
+{
+       return get_tree_bdev(fc, ntfs_fill_super);
+}
+
+/*
+ * ntfs_fs_free - Free fs_context.
+ *
+ * Note that this will be called after fill_super and reconfigure
+ * even when they pass. So they have to take pointers if they pass.
+ */
+static void ntfs_fs_free(struct fs_context *fc)
 {
-       return mount_bdev(fs_type, flags, dev_name, data, ntfs_fill_super);
+       struct ntfs_mount_options *opts = fc->fs_private;
+       struct ntfs_sb_info *sbi = fc->s_fs_info;
+
+       if (sbi)
+               put_ntfs(sbi);
+
+       if (opts)
+               put_mount_options(opts);
+}
+
+static const struct fs_context_operations ntfs_context_ops = {
+       .parse_param    = ntfs_fs_parse_param,
+       .get_tree       = ntfs_fs_get_tree,
+       .reconfigure    = ntfs_fs_reconfigure,
+       .free           = ntfs_fs_free,
+};
+
+/*
+ * ntfs_init_fs_context - Initialize spi and opts
+ *
+ * This will called when mount/remount. We will first initiliaze
+ * options so that if remount we can use just that.
+ */
+static int ntfs_init_fs_context(struct fs_context *fc)
+{
+       struct ntfs_mount_options *opts;
+       struct ntfs_sb_info *sbi;
+
+       opts = kzalloc(sizeof(struct ntfs_mount_options), GFP_NOFS);
+       if (!opts)
+               return -ENOMEM;
+
+       /* Default options. */
+       opts->fs_uid = current_uid();
+       opts->fs_gid = current_gid();
+       opts->fs_fmask_inv = ~current_umask();
+       opts->fs_dmask_inv = ~current_umask();
+
+       if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE)
+               goto ok;
+
+       sbi = kzalloc(sizeof(struct ntfs_sb_info), GFP_NOFS);
+       if (!sbi)
+               goto free_opts;
+
+       sbi->upcase = kvmalloc(0x10000 * sizeof(short), GFP_KERNEL);
+       if (!sbi->upcase)
+               goto free_sbi;
+
+       ratelimit_state_init(&sbi->msg_ratelimit, DEFAULT_RATELIMIT_INTERVAL,
+                            DEFAULT_RATELIMIT_BURST);
+
+       mutex_init(&sbi->compress.mtx_lznt);
+#ifdef CONFIG_NTFS3_LZX_XPRESS
+       mutex_init(&sbi->compress.mtx_xpress);
+       mutex_init(&sbi->compress.mtx_lzx);
+#endif
+
+       sbi->options = opts;
+       fc->s_fs_info = sbi;
+ok:
+       fc->fs_private = opts;
+       fc->ops = &ntfs_context_ops;
+
+       return 0;
+free_sbi:
+       kfree(sbi);
+free_opts:
+       kfree(opts);
+       return -ENOMEM;
 }
 
 // clang-format off
 static struct file_system_type ntfs_fs_type = {
-       .owner          = THIS_MODULE,
-       .name           = "ntfs3",
-       .mount          = ntfs_mount,
-       .kill_sb        = kill_block_super,
-       .fs_flags       = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
+       .owner                  = THIS_MODULE,
+       .name                   = "ntfs3",
+       .init_fs_context        = ntfs_init_fs_context,
+       .parameters             = ntfs_fs_parameters,
+       .kill_sb                = kill_block_super,
+       .fs_flags               = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
 };
 // clang-format on
 
index bbeba778237eefc318b00e04b05ad0ac1592e1ec..b5e8256fd710d5263b2be07b92df14cb8eea3c35 100644 (file)
@@ -5,13 +5,9 @@
  *
  */
 
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
-#include <linux/module.h>
-#include <linux/nls.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
 
-#include "debug.h"
-#include "ntfs.h"
 #include "ntfs_fs.h"
 
 static inline u16 upcase_unicode_char(const u16 *upcase, u16 chr)
index 7282d85c4ece447c42118b81ae82d9924003a0ce..afd0ddad826ff49f661ce758bcbecec46921fd1c 100644 (file)
@@ -5,10 +5,7 @@
  *
  */
 
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
 #include <linux/fs.h>
-#include <linux/nls.h>
 #include <linux/posix_acl.h>
 #include <linux/posix_acl_xattr.h>
 #include <linux/xattr.h>
@@ -78,6 +75,7 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
                        size_t add_bytes, const struct EA_INFO **info)
 {
        int err;
+       struct ntfs_sb_info *sbi = ni->mi.sbi;
        struct ATTR_LIST_ENTRY *le = NULL;
        struct ATTRIB *attr_info, *attr_ea;
        void *ea_p;
@@ -102,10 +100,10 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
 
        /* Check Ea limit. */
        size = le32_to_cpu((*info)->size);
-       if (size > ni->mi.sbi->ea_max_size)
+       if (size > sbi->ea_max_size)
                return -EFBIG;
 
-       if (attr_size(attr_ea) > ni->mi.sbi->ea_max_size)
+       if (attr_size(attr_ea) > sbi->ea_max_size)
                return -EFBIG;
 
        /* Allocate memory for packed Ea. */
@@ -113,15 +111,16 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
        if (!ea_p)
                return -ENOMEM;
 
-       if (attr_ea->non_res) {
+       if (!size) {
+               ;
+       } else if (attr_ea->non_res) {
                struct runs_tree run;
 
                run_init(&run);
 
                err = attr_load_runs(attr_ea, ni, &run, NULL);
                if (!err)
-                       err = ntfs_read_run_nb(ni->mi.sbi, &run, 0, ea_p, size,
-                                              NULL);
+                       err = ntfs_read_run_nb(sbi, &run, 0, ea_p, size, NULL);
                run_close(&run);
 
                if (err)
@@ -260,7 +259,7 @@ out:
 
 static noinline int ntfs_set_ea(struct inode *inode, const char *name,
                                size_t name_len, const void *value,
-                               size_t val_size, int flags, int locked)
+                               size_t val_size, int flags)
 {
        struct ntfs_inode *ni = ntfs_i(inode);
        struct ntfs_sb_info *sbi = ni->mi.sbi;
@@ -279,8 +278,7 @@ static noinline int ntfs_set_ea(struct inode *inode, const char *name,
        u64 new_sz;
        void *p;
 
-       if (!locked)
-               ni_lock(ni);
+       ni_lock(ni);
 
        run_init(&ea_run);
 
@@ -370,21 +368,22 @@ static noinline int ntfs_set_ea(struct inode *inode, const char *name,
        new_ea->name[name_len] = 0;
        memcpy(new_ea->name + name_len + 1, value, val_size);
        new_pack = le16_to_cpu(ea_info.size_pack) + packed_ea_size(new_ea);
-
-       /* Should fit into 16 bits. */
-       if (new_pack > 0xffff) {
-               err = -EFBIG; // -EINVAL?
-               goto out;
-       }
        ea_info.size_pack = cpu_to_le16(new_pack);
-
        /* New size of ATTR_EA. */
        size += add;
-       if (size > sbi->ea_max_size) {
+       ea_info.size = cpu_to_le32(size);
+
+       /*
+        * 1. Check ea_info.size_pack for overflow.
+        * 2. New attibute size must fit value from $AttrDef
+        */
+       if (new_pack > 0xffff || size > sbi->ea_max_size) {
+               ntfs_inode_warn(
+                       inode,
+                       "The size of extended attributes must not exceed 64KiB");
                err = -EFBIG; // -EINVAL?
                goto out;
        }
-       ea_info.size = cpu_to_le32(size);
 
 update_ea:
 
@@ -444,7 +443,7 @@ update_ea:
                /* Delete xattr, ATTR_EA */
                ni_remove_attr_le(ni, attr, mi, le);
        } else if (attr->non_res) {
-               err = ntfs_sb_write_run(sbi, &ea_run, 0, ea_all, size);
+               err = ntfs_sb_write_run(sbi, &ea_run, 0, ea_all, size, 0);
                if (err)
                        goto out;
        } else {
@@ -468,8 +467,7 @@ update_ea:
        mark_inode_dirty(&ni->vfs_inode);
 
 out:
-       if (!locked)
-               ni_unlock(ni);
+       ni_unlock(ni);
 
        run_close(&ea_run);
        kfree(ea_all);
@@ -478,12 +476,6 @@ out:
 }
 
 #ifdef CONFIG_NTFS3_FS_POSIX_ACL
-static inline void ntfs_posix_acl_release(struct posix_acl *acl)
-{
-       if (acl && refcount_dec_and_test(&acl->a_refcount))
-               kfree(acl);
-}
-
 static struct posix_acl *ntfs_get_acl_ex(struct user_namespace *mnt_userns,
                                         struct inode *inode, int type,
                                         int locked)
@@ -521,12 +513,15 @@ static struct posix_acl *ntfs_get_acl_ex(struct user_namespace *mnt_userns,
        /* Translate extended attribute to acl. */
        if (err >= 0) {
                acl = posix_acl_from_xattr(mnt_userns, buf, err);
-               if (!IS_ERR(acl))
-                       set_cached_acl(inode, type, acl);
+       } else if (err == -ENODATA) {
+               acl = NULL;
        } else {
-               acl = err == -ENODATA ? NULL : ERR_PTR(err);
+               acl = ERR_PTR(err);
        }
 
+       if (!IS_ERR(acl))
+               set_cached_acl(inode, type, acl);
+
        __putname(buf);
 
        return acl;
@@ -546,12 +541,13 @@ struct posix_acl *ntfs_get_acl(struct inode *inode, int type, bool rcu)
 
 static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
                                    struct inode *inode, struct posix_acl *acl,
-                                   int type, int locked)
+                                   int type)
 {
        const char *name;
        size_t size, name_len;
        void *value = NULL;
        int err = 0;
+       int flags;
 
        if (S_ISLNK(inode->i_mode))
                return -EOPNOTSUPP;
@@ -561,22 +557,15 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
                if (acl) {
                        umode_t mode = inode->i_mode;
 
-                       err = posix_acl_equiv_mode(acl, &mode);
-                       if (err < 0)
-                               return err;
+                       err = posix_acl_update_mode(mnt_userns, inode, &mode,
+                                                   &acl);
+                       if (err)
+                               goto out;
 
                        if (inode->i_mode != mode) {
                                inode->i_mode = mode;
                                mark_inode_dirty(inode);
                        }
-
-                       if (!err) {
-                               /*
-                                * ACL can be exactly represented in the
-                                * traditional file mode permission bits.
-                                */
-                               acl = NULL;
-                       }
                }
                name = XATTR_NAME_POSIX_ACL_ACCESS;
                name_len = sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1;
@@ -594,20 +583,24 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
        }
 
        if (!acl) {
+               /* Remove xattr if it can be presented via mode. */
                size = 0;
                value = NULL;
+               flags = XATTR_REPLACE;
        } else {
                size = posix_acl_xattr_size(acl->a_count);
                value = kmalloc(size, GFP_NOFS);
                if (!value)
                        return -ENOMEM;
-
                err = posix_acl_to_xattr(mnt_userns, acl, value, size);
                if (err < 0)
                        goto out;
+               flags = 0;
        }
 
-       err = ntfs_set_ea(inode, name, name_len, value, size, 0, locked);
+       err = ntfs_set_ea(inode, name, name_len, value, size, flags);
+       if (err == -ENODATA && !size)
+               err = 0; /* Removing non existed xattr. */
        if (!err)
                set_cached_acl(inode, type, acl);
 
@@ -623,68 +616,7 @@ out:
 int ntfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
                 struct posix_acl *acl, int type)
 {
-       return ntfs_set_acl_ex(mnt_userns, inode, acl, type, 0);
-}
-
-static int ntfs_xattr_get_acl(struct user_namespace *mnt_userns,
-                             struct inode *inode, int type, void *buffer,
-                             size_t size)
-{
-       struct posix_acl *acl;
-       int err;
-
-       if (!(inode->i_sb->s_flags & SB_POSIXACL)) {
-               ntfs_inode_warn(inode, "add mount option \"acl\" to use acl");
-               return -EOPNOTSUPP;
-       }
-
-       acl = ntfs_get_acl(inode, type, false);
-       if (IS_ERR(acl))
-               return PTR_ERR(acl);
-
-       if (!acl)
-               return -ENODATA;
-
-       err = posix_acl_to_xattr(mnt_userns, acl, buffer, size);
-       ntfs_posix_acl_release(acl);
-
-       return err;
-}
-
-static int ntfs_xattr_set_acl(struct user_namespace *mnt_userns,
-                             struct inode *inode, int type, const void *value,
-                             size_t size)
-{
-       struct posix_acl *acl;
-       int err;
-
-       if (!(inode->i_sb->s_flags & SB_POSIXACL)) {
-               ntfs_inode_warn(inode, "add mount option \"acl\" to use acl");
-               return -EOPNOTSUPP;
-       }
-
-       if (!inode_owner_or_capable(mnt_userns, inode))
-               return -EPERM;
-
-       if (!value) {
-               acl = NULL;
-       } else {
-               acl = posix_acl_from_xattr(mnt_userns, value, size);
-               if (IS_ERR(acl))
-                       return PTR_ERR(acl);
-
-               if (acl) {
-                       err = posix_acl_valid(mnt_userns, acl);
-                       if (err)
-                               goto release_and_out;
-               }
-       }
-
-       err = ntfs_set_acl(mnt_userns, inode, acl, type);
-
-release_and_out:
-       ntfs_posix_acl_release(acl);
-       return err;
+       return ntfs_set_acl_ex(mnt_userns, inode, acl, type);
 }
 
 /*
@@ -698,54 +630,27 @@ int ntfs_init_acl(struct user_namespace *mnt_userns, struct inode *inode,
        struct posix_acl *default_acl, *acl;
        int err;
 
-       /*
-        * TODO: Refactoring lock.
-        * ni_lock(dir) ... -> posix_acl_create(dir,...) -> ntfs_get_acl -> ni_lock(dir)
-        */
-       inode->i_default_acl = NULL;
-
-       default_acl = ntfs_get_acl_ex(mnt_userns, dir, ACL_TYPE_DEFAULT, 1);
-
-       if (!default_acl || default_acl == ERR_PTR(-EOPNOTSUPP)) {
-               inode->i_mode &= ~current_umask();
-               err = 0;
-               goto out;
-       }
-
-       if (IS_ERR(default_acl)) {
-               err = PTR_ERR(default_acl);
-               goto out;
-       }
-
-       acl = default_acl;
-       err = __posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
-       if (err < 0)
-               goto out1;
-       if (!err) {
-               posix_acl_release(acl);
-               acl = NULL;
-       }
+       err = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
+       if (err)
+               return err;
 
-       if (!S_ISDIR(inode->i_mode)) {
+       if (default_acl) {
+               err = ntfs_set_acl_ex(mnt_userns, inode, default_acl,
+                                     ACL_TYPE_DEFAULT);
                posix_acl_release(default_acl);
-               default_acl = NULL;
+       } else {
+               inode->i_default_acl = NULL;
        }
 
-       if (default_acl)
-               err = ntfs_set_acl_ex(mnt_userns, inode, default_acl,
-                                     ACL_TYPE_DEFAULT, 1);
-
        if (!acl)
                inode->i_acl = NULL;
-       else if (!err)
-               err = ntfs_set_acl_ex(mnt_userns, inode, acl, ACL_TYPE_ACCESS,
-                                     1);
-
-       posix_acl_release(acl);
-out1:
-       posix_acl_release(default_acl);
+       else {
+               if (!err)
+                       err = ntfs_set_acl_ex(mnt_userns, inode, acl,
+                                             ACL_TYPE_ACCESS);
+               posix_acl_release(acl);
+       }
 
-out:
        return err;
 }
 #endif
@@ -772,7 +677,7 @@ int ntfs_acl_chmod(struct user_namespace *mnt_userns, struct inode *inode)
 int ntfs_permission(struct user_namespace *mnt_userns, struct inode *inode,
                    int mask)
 {
-       if (ntfs_sb(inode->i_sb)->options.no_acs_rules) {
+       if (ntfs_sb(inode->i_sb)->options->noacsrules) {
                /* "No access rules" mode - Allow all changes. */
                return 0;
        }
@@ -880,23 +785,6 @@ static int ntfs_getxattr(const struct xattr_handler *handler, struct dentry *de,
                goto out;
        }
 
-#ifdef CONFIG_NTFS3_FS_POSIX_ACL
-       if ((name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1 &&
-            !memcmp(name, XATTR_NAME_POSIX_ACL_ACCESS,
-                    sizeof(XATTR_NAME_POSIX_ACL_ACCESS))) ||
-           (name_len == sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1 &&
-            !memcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT,
-                    sizeof(XATTR_NAME_POSIX_ACL_DEFAULT)))) {
-               /* TODO: init_user_ns? */
-               err = ntfs_xattr_get_acl(
-                       &init_user_ns, inode,
-                       name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1
-                               ? ACL_TYPE_ACCESS
-                               : ACL_TYPE_DEFAULT,
-                       buffer, size);
-               goto out;
-       }
-#endif
        /* Deal with NTFS extended attribute. */
        err = ntfs_get_ea(inode, name, name_len, buffer, size, NULL);
 
@@ -1009,24 +897,8 @@ set_new_fa:
                goto out;
        }
 
-#ifdef CONFIG_NTFS3_FS_POSIX_ACL
-       if ((name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1 &&
-            !memcmp(name, XATTR_NAME_POSIX_ACL_ACCESS,
-                    sizeof(XATTR_NAME_POSIX_ACL_ACCESS))) ||
-           (name_len == sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1 &&
-            !memcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT,
-                    sizeof(XATTR_NAME_POSIX_ACL_DEFAULT)))) {
-               err = ntfs_xattr_set_acl(
-                       mnt_userns, inode,
-                       name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1
-                               ? ACL_TYPE_ACCESS
-                               : ACL_TYPE_DEFAULT,
-                       value, size);
-               goto out;
-       }
-#endif
        /* Deal with NTFS extended attribute. */
-       err = ntfs_set_ea(inode, name, name_len, value, size, flags, 0);
+       err = ntfs_set_ea(inode, name, name_len, value, size, flags);
 
 out:
        return err;
@@ -1042,28 +914,29 @@ int ntfs_save_wsl_perm(struct inode *inode)
        int err;
        __le32 value;
 
+       /* TODO: refactor this, so we don't lock 4 times in ntfs_set_ea */
        value = cpu_to_le32(i_uid_read(inode));
        err = ntfs_set_ea(inode, "$LXUID", sizeof("$LXUID") - 1, &value,
-                         sizeof(value), 0, 0);
+                         sizeof(value), 0);
        if (err)
                goto out;
 
        value = cpu_to_le32(i_gid_read(inode));
        err = ntfs_set_ea(inode, "$LXGID", sizeof("$LXGID") - 1, &value,
-                         sizeof(value), 0, 0);
+                         sizeof(value), 0);
        if (err)
                goto out;
 
        value = cpu_to_le32(inode->i_mode);
        err = ntfs_set_ea(inode, "$LXMOD", sizeof("$LXMOD") - 1, &value,
-                         sizeof(value), 0, 0);
+                         sizeof(value), 0);
        if (err)
                goto out;
 
        if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
                value = cpu_to_le32(inode->i_rdev);
                err = ntfs_set_ea(inode, "$LXDEV", sizeof("$LXDEV") - 1, &value,
-                                 sizeof(value), 0, 0);
+                                 sizeof(value), 0);
                if (err)
                        goto out;
        }
index f1cc8258d34a4ba7200cdc4dfe514d9ab671b9f2..5d9ae17bd443f209ce4098e1bc30ef737cfca9bd 100644 (file)
@@ -7045,7 +7045,7 @@ void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di)
 int ocfs2_convert_inline_data_to_extents(struct inode *inode,
                                         struct buffer_head *di_bh)
 {
-       int ret, i, has_data, num_pages = 0;
+       int ret, has_data, num_pages = 0;
        int need_free = 0;
        u32 bit_off, num;
        handle_t *handle;
@@ -7054,26 +7054,17 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
        struct ocfs2_alloc_context *data_ac = NULL;
-       struct page **pages = NULL;
-       loff_t end = osb->s_clustersize;
+       struct page *page = NULL;
        struct ocfs2_extent_tree et;
        int did_quota = 0;
 
        has_data = i_size_read(inode) ? 1 : 0;
 
        if (has_data) {
-               pages = kcalloc(ocfs2_pages_per_cluster(osb->sb),
-                               sizeof(struct page *), GFP_NOFS);
-               if (pages == NULL) {
-                       ret = -ENOMEM;
-                       mlog_errno(ret);
-                       return ret;
-               }
-
                ret = ocfs2_reserve_clusters(osb, 1, &data_ac);
                if (ret) {
                        mlog_errno(ret);
-                       goto free_pages;
+                       goto out;
                }
        }
 
@@ -7093,7 +7084,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
        }
 
        if (has_data) {
-               unsigned int page_end;
+               unsigned int page_end = min_t(unsigned, PAGE_SIZE,
+                                                       osb->s_clustersize);
                u64 phys;
 
                ret = dquot_alloc_space_nodirty(inode,
@@ -7117,15 +7109,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
                 */
                block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
 
-               /*
-                * Non sparse file systems zero on extend, so no need
-                * to do that now.
-                */
-               if (!ocfs2_sparse_alloc(osb) &&
-                   PAGE_SIZE < osb->s_clustersize)
-                       end = PAGE_SIZE;
-
-               ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages);
+               ret = ocfs2_grab_eof_pages(inode, 0, page_end, &page,
+                                          &num_pages);
                if (ret) {
                        mlog_errno(ret);
                        need_free = 1;
@@ -7136,20 +7121,15 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
                 * This should populate the 1st page for us and mark
                 * it up to date.
                 */
-               ret = ocfs2_read_inline_data(inode, pages[0], di_bh);
+               ret = ocfs2_read_inline_data(inode, page, di_bh);
                if (ret) {
                        mlog_errno(ret);
                        need_free = 1;
                        goto out_unlock;
                }
 
-               page_end = PAGE_SIZE;
-               if (PAGE_SIZE > osb->s_clustersize)
-                       page_end = osb->s_clustersize;
-
-               for (i = 0; i < num_pages; i++)
-                       ocfs2_map_and_dirty_page(inode, handle, 0, page_end,
-                                                pages[i], i > 0, &phys);
+               ocfs2_map_and_dirty_page(inode, handle, 0, page_end, page, 0,
+                                        &phys);
        }
 
        spin_lock(&oi->ip_lock);
@@ -7180,8 +7160,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
        }
 
 out_unlock:
-       if (pages)
-               ocfs2_unlock_and_free_pages(pages, num_pages);
+       if (page)
+               ocfs2_unlock_and_free_pages(&page, num_pages);
 
 out_commit:
        if (ret < 0 && did_quota)
@@ -7205,8 +7185,6 @@ out_commit:
 out:
        if (data_ac)
                ocfs2_free_alloc_context(data_ac);
-free_pages:
-       kfree(pages);
        return ret;
 }
 
index c86bd4e60e207dc5765164b6b9bd5cbf7b5c18c9..5c914ce9b3ac95636296e308cc2f61400f41ef55 100644 (file)
@@ -2167,11 +2167,17 @@ static int ocfs2_initialize_super(struct super_block *sb,
        }
 
        if (ocfs2_clusterinfo_valid(osb)) {
+               /*
+                * ci_stack and ci_cluster in ocfs2_cluster_info may not be null
+                * terminated, so make sure no overflow happens here by using
+                * memcpy. Destination strings will always be null terminated
+                * because osb is allocated using kzalloc.
+                */
                osb->osb_stackflags =
                        OCFS2_RAW_SB(di)->s_cluster_info.ci_stackflags;
-               strlcpy(osb->osb_cluster_stack,
+               memcpy(osb->osb_cluster_stack,
                       OCFS2_RAW_SB(di)->s_cluster_info.ci_stack,
-                      OCFS2_STACK_LABEL_LEN + 1);
+                      OCFS2_STACK_LABEL_LEN);
                if (strlen(osb->osb_cluster_stack) != OCFS2_STACK_LABEL_LEN) {
                        mlog(ML_ERROR,
                             "couldn't mount because of an invalid "
@@ -2180,9 +2186,9 @@ static int ocfs2_initialize_super(struct super_block *sb,
                        status = -EINVAL;
                        goto bail;
                }
-               strlcpy(osb->osb_cluster_name,
+               memcpy(osb->osb_cluster_name,
                        OCFS2_RAW_SB(di)->s_cluster_info.ci_cluster,
-                       OCFS2_CLUSTER_NAME_LEN + 1);
+                       OCFS2_CLUSTER_NAME_LEN);
        } else {
                /* The empty string is identical with classic tools that
                 * don't know about s_cluster_info. */
index 1fefb2b8960e94a8c9a7a1b81a5e70b83af4520f..93c7c267de934de9b10040151d1646887b3596bf 100644 (file)
@@ -1219,9 +1219,13 @@ static int ovl_rename(struct user_namespace *mnt_userns, struct inode *olddir,
                                goto out_dput;
                }
        } else {
-               if (!d_is_negative(newdentry) &&
-                   (!new_opaque || !ovl_is_whiteout(newdentry)))
-                       goto out_dput;
+               if (!d_is_negative(newdentry)) {
+                       if (!new_opaque || !ovl_is_whiteout(newdentry))
+                               goto out_dput;
+               } else {
+                       if (flags & RENAME_EXCHANGE)
+                               goto out_dput;
+               }
        }
 
        if (olddentry == trap)
index d081faa55e830e3e66469475e5e236d107c8b681..c88ac571593dc1924e9ea0fbf3ab5aeaa588a69f 100644 (file)
@@ -296,6 +296,12 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
        if (ret)
                return ret;
 
+       ret = -EINVAL;
+       if (iocb->ki_flags & IOCB_DIRECT &&
+           (!real.file->f_mapping->a_ops ||
+            !real.file->f_mapping->a_ops->direct_IO))
+               goto out_fdput;
+
        old_cred = ovl_override_creds(file_inode(file)->i_sb);
        if (is_sync_kiocb(iocb)) {
                ret = vfs_iter_read(real.file, iter, &iocb->ki_pos,
@@ -320,7 +326,7 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 out:
        revert_creds(old_cred);
        ovl_file_accessed(file);
-
+out_fdput:
        fdput(real);
 
        return ret;
@@ -349,6 +355,12 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
        if (ret)
                goto out_unlock;
 
+       ret = -EINVAL;
+       if (iocb->ki_flags & IOCB_DIRECT &&
+           (!real.file->f_mapping->a_ops ||
+            !real.file->f_mapping->a_ops->direct_IO))
+               goto out_fdput;
+
        if (!ovl_should_sync(OVL_FS(inode->i_sb)))
                ifl &= ~(IOCB_DSYNC | IOCB_SYNC);
 
@@ -384,6 +396,7 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
        }
 out:
        revert_creds(old_cred);
+out_fdput:
        fdput(real);
 
 out_unlock:
index 003f0d31743eb3cd95de8c8da21a4d0d731daf73..22bf14ab2d163b189ba3f9a3fedc5fe53ec7bccb 100644 (file)
@@ -1827,9 +1827,15 @@ static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
        if (mode_wp && mode_dontwake)
                return -EINVAL;
 
-       ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
-                                 uffdio_wp.range.len, mode_wp,
-                                 &ctx->mmap_changing);
+       if (mmget_not_zero(ctx->mm)) {
+               ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
+                                         uffdio_wp.range.len, mode_wp,
+                                         &ctx->mmap_changing);
+               mmput(ctx->mm);
+       } else {
+               return -ESRCH;
+       }
+
        if (ret)
                return ret;
 
index 4f5e59f062846afb243333a08b6751616e8d03d0..37dd3fe5b1e981a127d9b718d65f1bb625dc470a 100644 (file)
 
 #define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */
 
-#define VBSF_MOUNT_SIGNATURE_BYTE_0 ('\000')
-#define VBSF_MOUNT_SIGNATURE_BYTE_1 ('\377')
-#define VBSF_MOUNT_SIGNATURE_BYTE_2 ('\376')
-#define VBSF_MOUNT_SIGNATURE_BYTE_3 ('\375')
+static const unsigned char VBSF_MOUNT_SIGNATURE[4] = "\000\377\376\375";
 
 static int follow_symlinks;
 module_param(follow_symlinks, int, 0444);
@@ -386,12 +383,7 @@ fail_nomem:
 
 static int vboxsf_parse_monolithic(struct fs_context *fc, void *data)
 {
-       unsigned char *options = data;
-
-       if (options && options[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 &&
-                      options[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 &&
-                      options[2] == VBSF_MOUNT_SIGNATURE_BYTE_2 &&
-                      options[3] == VBSF_MOUNT_SIGNATURE_BYTE_3) {
+       if (data && !memcmp(data, VBSF_MOUNT_SIGNATURE, 4)) {
                vbg_err("vboxsf: Old binary mount data not supported, remove obsolete mount.vboxsf and/or update your VBoxService.\n");
                return -EINVAL;
        }
index 77e159a0346b17078701dd1b3bf2cc5416f9e4e4..60a4372aa4d75f5157865bbf525bfb1a2cd4bb7e 100644 (file)
@@ -177,7 +177,7 @@ static int build_merkle_tree(struct file *filp,
         * (level 0) and ascending to the root node (level 'num_levels - 1').
         * Then at the end (level 'num_levels'), calculate the root hash.
         */
-       blocks = (inode->i_size + params->block_size - 1) >>
+       blocks = ((u64)inode->i_size + params->block_size - 1) >>
                 params->log_blocksize;
        for (level = 0; level <= params->num_levels; level++) {
                err = build_merkle_tree_level(filp, level, blocks, params,
index 60ff8af7219feafe90b06d5c8c5b0df301498186..92df87f5fa3881abec4f18d20686cc991558ced4 100644 (file)
@@ -89,7 +89,7 @@ int fsverity_init_merkle_tree_params(struct merkle_tree_params *params,
         */
 
        /* Compute number of levels and the number of blocks in each level */
-       blocks = (inode->i_size + params->block_size - 1) >> log_blocksize;
+       blocks = ((u64)inode->i_size + params->block_size - 1) >> log_blocksize;
        pr_debug("Data is %lld bytes (%llu blocks)\n", inode->i_size, blocks);
        while (blocks > 1) {
                if (params->num_levels >= FS_VERITY_MAX_LEVELS) {
index fb172a03a7537e02495e1ab8c644c7a92f2a7b33..20ecb004f5a4ac7eea702fdb6591e8081b523100 100644 (file)
@@ -22,9 +22,14 @@ typedef __builtin_va_list va_list;
 #define va_arg(v, l)            __builtin_va_arg(v, l)
 #define va_copy(d, s)           __builtin_va_copy(d, s)
 #else
+#ifdef __KERNEL__
 #include <linux/stdarg.h>
-#endif
-#endif
+#else
+/* Used to build acpi tools */
+#include <stdarg.h>
+#endif /* __KERNEL__ */
+#endif /* ACPI_USE_BUILTIN_STDARG */
+#endif /* ! va_arg */
 
 #define ACPI_INLINE             __inline__
 
index cc7338f9e0d1bc90319076f30b8f0f2457681887..7ce93aaf69f8dbd45d054ff0b708d802cf786bfb 100644 (file)
@@ -957,7 +957,7 @@ static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
 
 #ifndef iounmap
 #define iounmap iounmap
-static inline void iounmap(void __iomem *addr)
+static inline void iounmap(volatile void __iomem *addr)
 {
 }
 #endif
index 24b40e5c160b2b43bdbe58bfedfc58ba2855aeff..018e776a34b9a90c06a0b2540b5aa0b0ef7fa36b 100644 (file)
@@ -613,7 +613,7 @@ void kunit_remove_resource(struct kunit *test, struct kunit_resource *res);
  * and is automatically cleaned up after the test case concludes. See &struct
  * kunit_resource for more information.
  */
-void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t flags);
+void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp);
 
 /**
  * kunit_kmalloc() - Like kmalloc() except the allocation is *test managed*.
@@ -657,9 +657,9 @@ static inline void *kunit_kzalloc(struct kunit *test, size_t size, gfp_t gfp)
  *
  * See kcalloc() and kunit_kmalloc_array() for more information.
  */
-static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp_t flags)
+static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp_t gfp)
 {
-       return kunit_kmalloc_array(test, n, size, flags | __GFP_ZERO);
+       return kunit_kmalloc_array(test, n, size, gfp | __GFP_ZERO);
 }
 
 void kunit_cleanup(struct kunit *test);
index 864b9997efb20d55c5df3fd2b544575134e070e9..90f21898aad838660e993c3e9fce553ea4b7af56 100644 (file)
@@ -61,7 +61,6 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
                            struct kvm_device_attr *attr);
 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
-int kvm_pmu_probe_pmuver(void);
 #else
 struct kvm_pmu {
 };
@@ -118,8 +117,6 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
        return 0;
 }
 
-static inline int kvm_pmu_probe_pmuver(void) { return 0xf; }
-
 #endif
 
 #endif
index 7d1cabe152622f7b42bcc691af539f4e0dd85989..63ccb525219022e17e39771c669de161ed54666a 100644 (file)
@@ -321,10 +321,20 @@ asmlinkage unsigned long __arm_smccc_sve_check(unsigned long x0);
  * from register 0 to 3 on return from the SMC instruction.  An optional
  * quirk structure provides vendor specific behavior.
  */
+#ifdef CONFIG_HAVE_ARM_SMCCC
 asmlinkage void __arm_smccc_smc(unsigned long a0, unsigned long a1,
                        unsigned long a2, unsigned long a3, unsigned long a4,
                        unsigned long a5, unsigned long a6, unsigned long a7,
                        struct arm_smccc_res *res, struct arm_smccc_quirk *quirk);
+#else
+static inline void __arm_smccc_smc(unsigned long a0, unsigned long a1,
+                       unsigned long a2, unsigned long a3, unsigned long a4,
+                       unsigned long a5, unsigned long a6, unsigned long a7,
+                       struct arm_smccc_res *res, struct arm_smccc_quirk *quirk)
+{
+       *res = (struct arm_smccc_res){};
+}
+#endif
 
 /**
  * __arm_smccc_hvc() - make HVC calls
index f4c16f19f83e3edcf11abb4541a538ef468d395a..020a7d5bf4701afbf1e504476ecd41323bd1fa6a 100644 (file)
@@ -578,11 +578,12 @@ struct btf_func_model {
  * programs only. Should not be used with normal calls and indirect calls.
  */
 #define BPF_TRAMP_F_SKIP_FRAME         BIT(2)
-
 /* Store IP address of the caller on the trampoline stack,
  * so it's available for trampoline's programs.
  */
 #define BPF_TRAMP_F_IP_ARG             BIT(3)
+/* Return the return value of fentry prog. Only used by bpf_struct_ops. */
+#define BPF_TRAMP_F_RET_FENTRY_RET     BIT(4)
 
 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
  * bytes on x86.  Pick a number to fit into BPF_IMAGE_SIZE / 2
index 832d8a74fa5960becb611c7e7adaec760c88c495..991911048857a8710a4ec3d2bd1c9f0faeccb6ee 100644 (file)
@@ -72,6 +72,8 @@ enum cpuhp_state {
        CPUHP_SLUB_DEAD,
        CPUHP_DEBUG_OBJ_DEAD,
        CPUHP_MM_WRITEBACK_DEAD,
+       /* Must be after CPUHP_MM_VMSTAT_DEAD */
+       CPUHP_MM_DEMOTION_DEAD,
        CPUHP_MM_VMSTAT_DEAD,
        CPUHP_SOFTIRQ_DEAD,
        CPUHP_NET_MVNETA_DEAD,
@@ -240,6 +242,8 @@ enum cpuhp_state {
        CPUHP_AP_BASE_CACHEINFO_ONLINE,
        CPUHP_AP_ONLINE_DYN,
        CPUHP_AP_ONLINE_DYN_END         = CPUHP_AP_ONLINE_DYN + 30,
+       /* Must be after CPUHP_AP_ONLINE_DYN for node_states[N_CPU] update */
+       CPUHP_AP_MM_DEMOTION_ONLINE,
        CPUHP_AP_X86_HPET_ONLINE,
        CPUHP_AP_X86_KVM_CLK_ONLINE,
        CPUHP_AP_DTPM_CPU_ONLINE,
index 5d4d07a9e1ed15e67c249cc402ba1908ef412b06..1e7399fc69c0a1e5304b892bb8accc15567cdfd3 100644 (file)
@@ -996,14 +996,15 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
  * cpumask; Typically used by bin_attribute to export cpumask bitmask
  * ABI.
  *
- * Returns the length of how many bytes have been copied.
+ * Returns the length of how many bytes have been copied, excluding
+ * terminating '\0'.
  */
 static inline ssize_t
 cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
                loff_t off, size_t count)
 {
        return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask),
-                                  nr_cpu_ids, off, count);
+                                  nr_cpu_ids, off, count) - 1;
 }
 
 /**
@@ -1018,7 +1019,7 @@ cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
                loff_t off, size_t count)
 {
        return bitmap_print_list_to_buf(buf, cpumask_bits(mask),
-                                  nr_cpu_ids, off, count);
+                                  nr_cpu_ids, off, count) - 1;
 }
 
 #if NR_CPUS <= BITS_PER_LONG
diff --git a/include/linux/dsa/mv88e6xxx.h b/include/linux/dsa/mv88e6xxx.h
new file mode 100644 (file)
index 0000000..8c3d45e
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright 2021 NXP
+ */
+
+#ifndef _NET_DSA_TAG_MV88E6XXX_H
+#define _NET_DSA_TAG_MV88E6XXX_H
+
+#include <linux/if_vlan.h>
+
+#define MV88E6XXX_VID_STANDALONE       0
+#define MV88E6XXX_VID_BRIDGED          (VLAN_N_VID - 1)
+
+#endif
index 435777a0073c76d3f760b0b43b1b5aaf7ea7cdbd..8ae999f587c48eb74b7c018a479a4d6eb836b2fe 100644 (file)
@@ -5,7 +5,28 @@
 #ifndef _NET_DSA_TAG_OCELOT_H
 #define _NET_DSA_TAG_OCELOT_H
 
+#include <linux/kthread.h>
 #include <linux/packing.h>
+#include <linux/skbuff.h>
+
+struct ocelot_skb_cb {
+       struct sk_buff *clone;
+       unsigned int ptp_class; /* valid only for clones */
+       u8 ptp_cmd;
+       u8 ts_id;
+};
+
+#define OCELOT_SKB_CB(skb) \
+       ((struct ocelot_skb_cb *)((skb)->cb))
+
+#define IFH_TAG_TYPE_C                 0
+#define IFH_TAG_TYPE_S                 1
+
+#define IFH_REW_OP_NOOP                        0x0
+#define IFH_REW_OP_DSCP                        0x1
+#define IFH_REW_OP_ONE_STEP_PTP                0x2
+#define IFH_REW_OP_TWO_STEP_PTP                0x3
+#define IFH_REW_OP_ORIGIN_PTP          0x5
 
 #define OCELOT_TAG_LEN                 16
 #define OCELOT_SHORT_PREFIX_LEN                4
  *         +------+------+------+------+------+------+------+------+
  */
 
+struct felix_deferred_xmit_work {
+       struct dsa_port *dp;
+       struct sk_buff *skb;
+       struct kthread_work work;
+};
+
+struct felix_port {
+       void (*xmit_work_fn)(struct kthread_work *work);
+       struct kthread_worker *xmit_worker;
+};
+
 static inline void ocelot_xfh_get_rew_val(void *extraction, u64 *rew_val)
 {
        packing(extraction, rew_val, 116, 85, OCELOT_TAG_LEN, UNPACK, 0);
@@ -215,4 +247,21 @@ static inline void ocelot_ifh_set_vid(void *injection, u64 vid)
        packing(injection, &vid, 11, 0, OCELOT_TAG_LEN, PACK, 0);
 }
 
+/* Determine the PTP REW_OP to use for injecting the given skb */
+static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb)
+{
+       struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
+       u8 ptp_cmd = OCELOT_SKB_CB(skb)->ptp_cmd;
+       u32 rew_op = 0;
+
+       if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP && clone) {
+               rew_op = ptp_cmd;
+               rew_op |= OCELOT_SKB_CB(clone)->ts_id << 3;
+       } else if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
+               rew_op = ptp_cmd;
+       }
+
+       return rew_op;
+}
+
 #endif
index 171106202fe5bef538778f5bd681223e6c430421..9e07079528a53fe39e3e4974998785cefcecd47d 100644 (file)
@@ -48,6 +48,10 @@ struct sja1105_tagger_data {
        spinlock_t meta_lock;
        unsigned long state;
        u8 ts_id;
+       /* Used on SJA1110 where meta frames are generated only for
+        * 2-step TX timestamps
+        */
+       struct sk_buff_head skb_txtstamp_queue;
 };
 
 struct sja1105_skb_cb {
@@ -69,42 +73,24 @@ struct sja1105_port {
        bool hwts_tx_en;
 };
 
-enum sja1110_meta_tstamp {
-       SJA1110_META_TSTAMP_TX = 0,
-       SJA1110_META_TSTAMP_RX = 1,
-};
-
-#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP)
-
-void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id,
-                                enum sja1110_meta_tstamp dir, u64 tstamp);
-
-#else
+/* Timestamps are in units of 8 ns clock ticks (equivalent to
+ * a fixed 125 MHz clock).
+ */
+#define SJA1105_TICK_NS                        8
 
-static inline void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port,
-                                              u8 ts_id, enum sja1110_meta_tstamp dir,
-                                              u64 tstamp)
+static inline s64 ns_to_sja1105_ticks(s64 ns)
 {
+       return ns / SJA1105_TICK_NS;
 }
 
-#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */
-
-#if IS_ENABLED(CONFIG_NET_DSA_SJA1105)
-
-extern const struct dsa_switch_ops sja1105_switch_ops;
-
-static inline bool dsa_port_is_sja1105(struct dsa_port *dp)
+static inline s64 sja1105_ticks_to_ns(s64 ticks)
 {
-       return dp->ds->ops == &sja1105_switch_ops;
+       return ticks * SJA1105_TICK_NS;
 }
 
-#else
-
 static inline bool dsa_port_is_sja1105(struct dsa_port *dp)
 {
-       return false;
+       return true;
 }
 
-#endif
-
 #endif /* _NET_DSA_SJA1105_H */
index 2aaa15779d50bb723b6c5a10922798c0a3bf3395..957ebec35aad01e73a70f464925b5af27becbf87 100644 (file)
@@ -109,7 +109,7 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg
 #endif
 }
 
-#if defined(CONFIG_UM) || defined(CONFIG_IA64)
+#if (defined(CONFIG_UML) && defined(CONFIG_X86_32)) || defined(CONFIG_IA64)
 /*
  * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
  * extra segments containing the gate DSO contents.  Dumping its
index 928c411bd50963278407ad317b22db4eaaed0895..c58d5045148547a136a523669b0f1007342ea56d 100644 (file)
@@ -308,7 +308,7 @@ static inline void ether_addr_copy(u8 *dst, const u8 *src)
  */
 static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr)
 {
-       ether_addr_copy(dev->dev_addr, addr);
+       __dev_addr_set(dev, addr, ETH_ALEN);
 }
 
 /**
index 59828516ebaf1932a098eb9988f95fe8d17da5fb..9f4ad719bfe3f8b5f49361db3750359bf954dfdc 100644 (file)
@@ -22,10 +22,15 @@ struct device;
  * LINKS_ADDED:        The fwnode has already be parsed to add fwnode links.
  * NOT_DEVICE: The fwnode will never be populated as a struct device.
  * INITIALIZED: The hardware corresponding to fwnode has been initialized.
+ * NEEDS_CHILD_BOUND_ON_ADD: For this fwnode/device to probe successfully, its
+ *                          driver needs its child devices to be bound with
+ *                          their respective drivers as soon as they are
+ *                          added.
  */
-#define FWNODE_FLAG_LINKS_ADDED                BIT(0)
-#define FWNODE_FLAG_NOT_DEVICE         BIT(1)
-#define FWNODE_FLAG_INITIALIZED                BIT(2)
+#define FWNODE_FLAG_LINKS_ADDED                        BIT(0)
+#define FWNODE_FLAG_NOT_DEVICE                 BIT(1)
+#define FWNODE_FLAG_INITIALIZED                        BIT(2)
+#define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD   BIT(3)
 
 struct fwnode_handle {
        struct fwnode_handle *secondary;
index c68d83c87f83fc9c0e3e946c5ccbd895e4d97724..0f5315c2b5a346eb716a77d0ba03e87138f363b3 100644 (file)
@@ -149,6 +149,7 @@ struct gendisk {
        unsigned long state;
 #define GD_NEED_PART_SCAN              0
 #define GD_READ_ONLY                   1
+#define GD_DEAD                                2
 
        struct mutex open_mutex;        /* open/close mutex */
        unsigned open_partitions;       /* number of open partitions */
index 041ca7f15ea4514f750f3f200c82d3fa377f94a7..0f18df7fe87491c8880e20c4934798bd6efc36fc 100644 (file)
@@ -608,7 +608,6 @@ struct kvm {
        unsigned long mmu_notifier_range_start;
        unsigned long mmu_notifier_range_end;
 #endif
-       long tlbs_dirty;
        struct list_head devices;
        u64 manual_dirty_log_protect;
        struct dentry *debugfs_dentry;
@@ -721,11 +720,6 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
        return NULL;
 }
 
-static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
-{
-       return vcpu->vcpu_idx;
-}
-
 #define kvm_for_each_memslot(memslot, slots)                           \
        for (memslot = &slots->memslots[0];                             \
             memslot < slots->memslots + slots->used_slots; memslot++)  \
index 7efc0a7c14c9db85bbfea80faaf531d49639e715..182c606adb060ff962f6f34e4bd76afb9ab63b50 100644 (file)
@@ -160,7 +160,10 @@ int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
 #define register_hotmemory_notifier(nb)                register_memory_notifier(nb)
 #define unregister_hotmemory_notifier(nb)      unregister_memory_notifier(nb)
 #else
-#define hotplug_memory_notifier(fn, pri)       ({ 0; })
+static inline int hotplug_memory_notifier(notifier_fn_t fn, int pri)
+{
+       return 0;
+}
 /* These aren't inline functions due to a GCC bug. */
 #define register_hotmemory_notifier(nb)    ({ (void)(nb); 0; })
 #define unregister_hotmemory_notifier(nb)  ({ (void)(nb); })
index e23417424373fc647d89e3df7d8ea2b51684ca43..f17d2101af7a0e28001946aea1363a0e6849638a 100644 (file)
@@ -1138,7 +1138,6 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
 bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
 bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
-bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
 bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
 bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
 bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
index f3638d09ba776468ec0f84439d0116e791993d18..993204a6c1a1376e47490ab0f72e0254921f85d3 100644 (file)
@@ -9475,16 +9475,22 @@ struct mlx5_ifc_pcmr_reg_bits {
        u8         reserved_at_0[0x8];
        u8         local_port[0x8];
        u8         reserved_at_10[0x10];
+
        u8         entropy_force_cap[0x1];
        u8         entropy_calc_cap[0x1];
        u8         entropy_gre_calc_cap[0x1];
-       u8         reserved_at_23[0x1b];
+       u8         reserved_at_23[0xf];
+       u8         rx_ts_over_crc_cap[0x1];
+       u8         reserved_at_33[0xb];
        u8         fcs_cap[0x1];
        u8         reserved_at_3f[0x1];
+
        u8         entropy_force[0x1];
        u8         entropy_calc[0x1];
        u8         entropy_gre_calc[0x1];
-       u8         reserved_at_43[0x1b];
+       u8         reserved_at_43[0xf];
+       u8         rx_ts_over_crc[0x1];
+       u8         reserved_at_53[0xb];
        u8         fcs_chk[0x1];
        u8         reserved_at_5f[0x1];
 };
index 505480217cf1a9b4f779b454d42bc78c464e189b..2512e2f9cd4e7964fe08c42052d4313f5cfc6679 100644 (file)
@@ -163,6 +163,12 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
 static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
 #endif
 
+#ifdef CONFIG_KVM
+void kvm_host_pmu_init(struct arm_pmu *pmu);
+#else
+#define kvm_host_pmu_init(x)   do { } while(0)
+#endif
+
 /* Internal functions only for core arm_pmu code */
 struct arm_pmu *armpmu_alloc(void);
 struct arm_pmu *armpmu_alloc_atomic(void);
index fe156a8170aa39ab4236a22174aa73ac0959d93b..9b60bb89d86ab817737139c18b8b6186d08de875 100644 (file)
@@ -683,7 +683,9 @@ struct perf_event {
        /*
         * timestamp shadows the actual context timing but it can
         * be safely used in NMI interrupt context. It reflects the
-        * context time as it was when the event was last scheduled in.
+        * context time as it was when the event was last scheduled in,
+        * or when ctx_sched_in failed to schedule the event because we
+        * run out of PMC.
         *
         * ctx_time already accounts for ctx->timestamp. Therefore to
         * compute ctx_time for a sample, simply add perf_clock().
index 43b5ce139c375e9af1527b462b1cd04a3992dbc1..878e572a78bf60ce2870809b6919f741e05c01d7 100644 (file)
@@ -48,6 +48,8 @@ struct omap_usb_config {
        u32 (*usb2_init)(unsigned nwires, unsigned alt_pingroup);
 
        int (*ocpi_enable)(void);
+
+       void (*lb_reset)(void);
 };
 
 #endif /* __LINUX_USB_OMAP1_H */
index c0475d1c98850b1058c83a728bc14c3ff504c5c6..81cad9e1e4120f5db1146cb65f4e6e7a25ceea84 100644 (file)
@@ -61,7 +61,6 @@ enum qcom_scm_ice_cipher {
 #define QCOM_SCM_PERM_RW (QCOM_SCM_PERM_READ | QCOM_SCM_PERM_WRITE)
 #define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC)
 
-#if IS_ENABLED(CONFIG_QCOM_SCM)
 extern bool qcom_scm_is_available(void);
 
 extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
@@ -115,74 +114,4 @@ extern int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
 extern int qcom_scm_lmh_profile_change(u32 profile_id);
 extern bool qcom_scm_lmh_dcvsh_available(void);
 
-#else
-
-#include <linux/errno.h>
-
-static inline bool qcom_scm_is_available(void) { return false; }
-
-static inline int qcom_scm_set_cold_boot_addr(void *entry,
-               const cpumask_t *cpus) { return -ENODEV; }
-static inline int qcom_scm_set_warm_boot_addr(void *entry,
-               const cpumask_t *cpus) { return -ENODEV; }
-static inline void qcom_scm_cpu_power_down(u32 flags) {}
-static inline u32 qcom_scm_set_remote_state(u32 state,u32 id)
-               { return -ENODEV; }
-
-static inline int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
-               size_t size) { return -ENODEV; }
-static inline int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
-               phys_addr_t size) { return -ENODEV; }
-static inline int qcom_scm_pas_auth_and_reset(u32 peripheral)
-               { return -ENODEV; }
-static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; }
-static inline bool qcom_scm_pas_supported(u32 peripheral) { return false; }
-
-static inline int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
-               { return -ENODEV; }
-static inline int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
-               { return -ENODEV; }
-
-static inline bool qcom_scm_restore_sec_cfg_available(void) { return false; }
-static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
-               { return -ENODEV; }
-static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
-               { return -ENODEV; }
-static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
-               { return -ENODEV; }
-extern inline int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
-                                                u32 cp_nonpixel_start,
-                                                u32 cp_nonpixel_size)
-               { return -ENODEV; }
-static inline int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
-               unsigned int *src, const struct qcom_scm_vmperm *newvm,
-               unsigned int dest_cnt) { return -ENODEV; }
-
-static inline bool qcom_scm_ocmem_lock_available(void) { return false; }
-static inline int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
-               u32 size, u32 mode) { return -ENODEV; }
-static inline int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id,
-               u32 offset, u32 size) { return -ENODEV; }
-
-static inline bool qcom_scm_ice_available(void) { return false; }
-static inline int qcom_scm_ice_invalidate_key(u32 index) { return -ENODEV; }
-static inline int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
-                                      enum qcom_scm_ice_cipher cipher,
-                                      u32 data_unit_size) { return -ENODEV; }
-
-static inline bool qcom_scm_hdcp_available(void) { return false; }
-static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
-               u32 *resp) { return -ENODEV; }
-
-static inline int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
-               { return -ENODEV; }
-
-static inline int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
-                                    u64 limit_node, u32 node_id, u64 version)
-               { return -ENODEV; }
-
-static inline int qcom_scm_lmh_profile_change(u32 profile_id) { return -ENODEV; }
-
-static inline bool qcom_scm_lmh_dcvsh_available(void) { return -ENODEV; }
-#endif
 #endif
index 39039ce8ac4c1774a8c1aca1d0d7bc5eb6e7485f..c1a927ddec646c60d5d22b212852116eecb08327 100644 (file)
@@ -1720,7 +1720,7 @@ extern struct pid *cad_pid;
 #define tsk_used_math(p)                       ((p)->flags & PF_USED_MATH)
 #define used_math()                            tsk_used_math(current)
 
-static inline bool is_percpu_thread(void)
+static __always_inline bool is_percpu_thread(void)
 {
 #ifdef CONFIG_SMP
        return (current->flags & PF_NO_SETAFFINITY) &&
index 21c3771e6a56b0cc4304370ebf9673273a0cf7dc..988528b5da438fa2e96f4166446361ee58958e02 100644 (file)
@@ -23,7 +23,7 @@ static inline bool page_is_secretmem(struct page *page)
        mapping = (struct address_space *)
                ((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
 
-       if (mapping != page->mapping)
+       if (!mapping || mapping != page->mapping)
                return false;
 
        return mapping->a_ops == &secretmem_aops;
index a9f9c5714e6505882b19000f6d44f1e705e3daef..fe95f0922526624201eebf7644868380ed9d378b 100644 (file)
  *  When function tracing occurs, the following steps are made:
  *   If arch does not support a ftrace feature:
  *    call internal function (uses INTERNAL bits) which calls...
- *   If callback is registered to the "global" list, the list
- *    function is called and recursion checks the GLOBAL bits.
- *    then this function calls...
  *   The function callback, which can use the FTRACE bits to
  *    check for recursion.
- *
- * Now if the arch does not support a feature, and it calls
- * the global list function which calls the ftrace callback
- * all three of these steps will do a recursion protection.
- * There's no reason to do one if the previous caller already
- * did. The recursion that we are protecting against will
- * go through the same steps again.
- *
- * To prevent the multiple recursion checks, if a recursion
- * bit is set that is higher than the MAX bit of the current
- * check, then we know that the check was made by the previous
- * caller, and we can skip the current check.
  */
 enum {
        /* Function recursion bits */
@@ -40,12 +25,14 @@ enum {
        TRACE_FTRACE_NMI_BIT,
        TRACE_FTRACE_IRQ_BIT,
        TRACE_FTRACE_SIRQ_BIT,
+       TRACE_FTRACE_TRANSITION_BIT,
 
-       /* INTERNAL_BITs must be greater than FTRACE_BITs */
+       /* Internal use recursion bits */
        TRACE_INTERNAL_BIT,
        TRACE_INTERNAL_NMI_BIT,
        TRACE_INTERNAL_IRQ_BIT,
        TRACE_INTERNAL_SIRQ_BIT,
+       TRACE_INTERNAL_TRANSITION_BIT,
 
        TRACE_BRANCH_BIT,
 /*
@@ -86,12 +73,6 @@ enum {
         */
        TRACE_GRAPH_NOTRACE_BIT,
 
-       /*
-        * When transitioning between context, the preempt_count() may
-        * not be correct. Allow for a single recursion to cover this case.
-        */
-       TRACE_TRANSITION_BIT,
-
        /* Used to prevent recursion recording from recursing. */
        TRACE_RECORD_RECURSION_BIT,
 };
@@ -113,12 +94,10 @@ enum {
 #define TRACE_CONTEXT_BITS     4
 
 #define TRACE_FTRACE_START     TRACE_FTRACE_BIT
-#define TRACE_FTRACE_MAX       ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
 
 #define TRACE_LIST_START       TRACE_INTERNAL_BIT
-#define TRACE_LIST_MAX         ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
 
-#define TRACE_CONTEXT_MASK     TRACE_LIST_MAX
+#define TRACE_CONTEXT_MASK     ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
 
 /*
  * Used for setting context
@@ -132,6 +111,7 @@ enum {
        TRACE_CTX_IRQ,
        TRACE_CTX_SOFTIRQ,
        TRACE_CTX_NORMAL,
+       TRACE_CTX_TRANSITION,
 };
 
 static __always_inline int trace_get_context_bit(void)
@@ -160,45 +140,34 @@ extern void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip);
 #endif
 
 static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsigned long pip,
-                                                       int start, int max)
+                                                       int start)
 {
        unsigned int val = READ_ONCE(current->trace_recursion);
        int bit;
 
-       /* A previous recursion check was made */
-       if ((val & TRACE_CONTEXT_MASK) > max)
-               return 0;
-
        bit = trace_get_context_bit() + start;
        if (unlikely(val & (1 << bit))) {
                /*
                 * It could be that preempt_count has not been updated during
                 * a switch between contexts. Allow for a single recursion.
                 */
-               bit = TRACE_TRANSITION_BIT;
+               bit = TRACE_CTX_TRANSITION + start;
                if (val & (1 << bit)) {
                        do_ftrace_record_recursion(ip, pip);
                        return -1;
                }
-       } else {
-               /* Normal check passed, clear the transition to allow it again */
-               val &= ~(1 << TRACE_TRANSITION_BIT);
        }
 
        val |= 1 << bit;
        current->trace_recursion = val;
        barrier();
 
-       return bit + 1;
+       return bit;
 }
 
 static __always_inline void trace_clear_recursion(int bit)
 {
-       if (!bit)
-               return;
-
        barrier();
-       bit--;
        trace_recursion_clear(bit);
 }
 
@@ -214,7 +183,7 @@ static __always_inline void trace_clear_recursion(int bit)
 static __always_inline int ftrace_test_recursion_trylock(unsigned long ip,
                                                         unsigned long parent_ip)
 {
-       return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START, TRACE_FTRACE_MAX);
+       return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START);
 }
 
 /**
index eb70cabe6e7f2f6362bde868a21dbf257701afe9..33a4240e6a6f1789f8a3153a264030e62f4645d7 100644 (file)
@@ -127,6 +127,8 @@ static inline long get_ucounts_value(struct ucounts *ucounts, enum ucount_type t
 
 long inc_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v);
 bool dec_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v);
+long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum ucount_type type);
+void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum ucount_type type);
 bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max);
 
 static inline void set_rlimit_ucount_max(struct user_namespace *ns,
index 2ebef6b1a3d6cd8f429be752b14c110acff63f90..74d3c1efd9bb5c8d8d6507f5f9a0ff607d893789 100644 (file)
@@ -399,9 +399,8 @@ extern struct workqueue_struct *system_freezable_power_efficient_wq;
  * RETURNS:
  * Pointer to the allocated workqueue on success, %NULL on failure.
  */
-struct workqueue_struct *alloc_workqueue(const char *fmt,
-                                        unsigned int flags,
-                                        int max_active, ...);
+__printf(1, 4) struct workqueue_struct *
+alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
 
 /**
  * alloc_ordered_workqueue - allocate an ordered workqueue
index 21c5386d4a6dc9108b9fd17aeb4f1e6bc365c074..ab5348e57db1a627cbce2dededb2e9b754d1f2cd 100644 (file)
@@ -597,5 +597,5 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
 int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh,
                     u8 rt_family, unsigned char *flags, bool skip_oif);
 int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh,
-                   int nh_weight, u8 rt_family);
+                   int nh_weight, u8 rt_family, u32 nh_tclassid);
 #endif  /* _NET_FIB_H */
index af0fc13cea3499e60e910adfcd98033d258ec937..618d1f427cb2761cee907d3615d19df39a6b50de 100644 (file)
@@ -2818,13 +2818,13 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
  * Mac80211 drivers should set the @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 flag
  * when they are able to replace in-use PTK keys according to the following
  * requirements:
- * 1) They do not hand over frames decrypted with the old key to
-      mac80211 once the call to set_key() with command %DISABLE_KEY has been
-      completed when also setting @IEEE80211_KEY_FLAG_GENERATE_IV for any key,
+ * 1) They do not hand over frames decrypted with the old key to mac80211
+      once the call to set_key() with command %DISABLE_KEY has been completed,
    2) either drop or continue to use the old key for any outgoing frames queued
       at the time of the key deletion (including re-transmits),
    3) never send out a frame queued prior to the set_key() %SET_KEY command
-      encrypted with the new key and
+      encrypted with the new key when also needing
+      @IEEE80211_KEY_FLAG_GENERATE_IV and
    4) never send out a frame unencrypted when it should be encrypted.
    Mac80211 will not queue any new frames for a deleted key to the driver.
  */
index a824d47c3c6d866a31db6d10b7822abe30496edc..ffd2c23bd76d56a58b389abd46a69979405ec132 100644 (file)
@@ -54,7 +54,7 @@ struct mctp_sock {
        struct sock     sk;
 
        /* bind() params */
-       int             bind_net;
+       unsigned int    bind_net;
        mctp_eid_t      bind_addr;
        __u8            bind_type;
 
index 0fd8a4159662d094fd5f02c34c856a9bb08d2813..ceadf8ba25a446b648f21adb71361609ff58beee 100644 (file)
@@ -17,7 +17,6 @@ struct inet_frags_ctl;
 struct nft_ct_frag6_pernet {
        struct ctl_table_header *nf_frag_frags_hdr;
        struct fqdir    *fqdir;
-       unsigned int users;
 };
 
 #endif /* _NF_DEFRAG_IPV6_H */
index 148f5d8ee5ab35fc002060d854ce2a89e2ef9f21..a16171c5fd9ebc8c9cb214e700584ca34eb1a016 100644 (file)
@@ -1202,7 +1202,7 @@ struct nft_object *nft_obj_lookup(const struct net *net,
 
 void nft_obj_notify(struct net *net, const struct nft_table *table,
                    struct nft_object *obj, u32 portid, u32 seq,
-                   int event, int family, int report, gfp_t gfp);
+                   int event, u16 flags, int family, int report, gfp_t gfp);
 
 /**
  *     struct nft_object_type - stateful object type
index 986a2a9cfdfa444fe8c2087e1a72507baf469a81..b593f95e99913e9946da7bd2d3b28462350217ee 100644 (file)
@@ -27,5 +27,11 @@ struct netns_nf {
 #if IS_ENABLED(CONFIG_DECNET)
        struct nf_hook_entries __rcu *hooks_decnet[NF_DN_NUMHOOKS];
 #endif
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
+       unsigned int defrag_ipv4_users;
+#endif
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+       unsigned int defrag_ipv6_users;
+#endif
 };
 #endif
index 10e1777877e6aaf818c7fba442e5dd297c925bd9..28085b995ddcfe2269f1f8524ea747a881d835a4 100644 (file)
@@ -325,7 +325,7 @@ int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh,
                struct fib_nh_common *nhc = &nhi->fib_nhc;
                int weight = nhg->nh_entries[i].weight;
 
-               if (fib_add_nexthop(skb, nhc, weight, rt_family) < 0)
+               if (fib_add_nexthop(skb, nhc, weight, rt_family, 0) < 0)
                        return -EMSGSIZE;
        }
 
index 6d7b12cba0158f9a954949b1fc42ee418d4b56a7..bf79f3a890af263dcb52056a56bbdf751e28064f 100644 (file)
@@ -11,6 +11,7 @@
 #include <uapi/linux/pkt_sched.h>
 
 #define DEFAULT_TX_QUEUE_LEN   1000
+#define STAB_SIZE_LOG_MAX      30
 
 struct qdisc_walker {
        int     stop;
index 2eb6d7c2c9310db5f3bfdbae8cffdb69b314cf7a..f37c7a558d6dd7c5baf5f39d2fac309fde12da8e 100644 (file)
@@ -384,11 +384,11 @@ sctp_vtag_verify(const struct sctp_chunk *chunk,
         * Verification Tag value does not match the receiver's own
         * tag value, the receiver shall silently discard the packet...
         */
-        if (ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag)
-                return 1;
+       if (ntohl(chunk->sctp_hdr->vtag) != asoc->c.my_vtag)
+               return 0;
 
        chunk->transport->encap_port = SCTP_INPUT_CB(chunk->skb)->encap_port;
-       return 0;
+       return 1;
 }
 
 /* Check VTAG of the packet matches the sender's own tag and the T bit is
index c005c3c750e89b6d4d36d36ccfad392a7e733603..ea6fbc88c8f90f44f660bbca2cd2288e73039ec7 100644 (file)
@@ -307,6 +307,7 @@ struct bpf_local_storage;
   *    @sk_priority: %SO_PRIORITY setting
   *    @sk_type: socket type (%SOCK_STREAM, etc)
   *    @sk_protocol: which protocol this socket belongs in this network family
+  *    @sk_peer_lock: lock protecting @sk_peer_pid and @sk_peer_cred
   *    @sk_peer_pid: &struct pid for this socket's peer
   *    @sk_peer_cred: %SO_PEERCRED setting
   *    @sk_rcvlowat: %SO_RCVLOWAT setting
@@ -488,8 +489,10 @@ struct sock {
        u8                      sk_prefer_busy_poll;
        u16                     sk_busy_poll_budget;
 #endif
+       spinlock_t              sk_peer_lock;
        struct pid              *sk_peer_pid;
        const struct cred       *sk_peer_cred;
+
        long                    sk_rcvtimeo;
        ktime_t                 sk_stamp;
 #if BITS_PER_LONG==32
@@ -1623,7 +1626,36 @@ void release_sock(struct sock *sk);
                                SINGLE_DEPTH_NESTING)
 #define bh_unlock_sock(__sk)   spin_unlock(&((__sk)->sk_lock.slock))
 
-bool lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);
+bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);
+
+/**
+ * lock_sock_fast - fast version of lock_sock
+ * @sk: socket
+ *
+ * This version should be used for very small section, where process wont block
+ * return false if fast path is taken:
+ *
+ *   sk_lock.slock locked, owned = 0, BH disabled
+ *
+ * return true if slow path is taken:
+ *
+ *   sk_lock.slock unlocked, owned = 1, BH enabled
+ */
+static inline bool lock_sock_fast(struct sock *sk)
+{
+       /* The sk_lock has mutex_lock() semantics here. */
+       mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
+
+       return __lock_sock_fast(sk);
+}
+
+/* fast socket lock variant for caller already holding a [different] socket lock */
+static inline bool lock_sock_fast_nested(struct sock *sk)
+{
+       mutex_acquire(&sk->sk_lock.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);
+
+       return __lock_sock_fast(sk);
+}
 
 /**
  * unlock_sock_fast - complement of lock_sock_fast
index 3166dc15d7d634676ad30fdc1e57b7517087bb68..60c384569e9cd6e48fe09b4da5251f6b4c9d5060 100644 (file)
@@ -1576,6 +1576,7 @@ struct tcp_md5sig_key {
        u8                      keylen;
        u8                      family; /* AF_INET or AF_INET6 */
        u8                      prefixlen;
+       u8                      flags;
        union tcp_md5_addr      addr;
        int                     l3index; /* set if key added with L3 scope */
        u8                      key[TCP_MD5SIG_MAXKEYLEN];
@@ -1621,10 +1622,10 @@ struct tcp_md5sig_pool {
 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
                        const struct sock *sk, const struct sk_buff *skb);
 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
-                  int family, u8 prefixlen, int l3index,
+                  int family, u8 prefixlen, int l3index, u8 flags,
                   const u8 *newkey, u8 newkeylen, gfp_t gfp);
 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
-                  int family, u8 prefixlen, int l3index);
+                  int family, u8 prefixlen, int l3index, u8 flags);
 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
                                         const struct sock *addr_sk);
 
index 06706a9fd5b1c2d1a1e5dc4ad03ff5a5e32983ef..d7055b41982dfaa7f99189728c14b1cd0316dbc9 100644 (file)
 /* Source PGIDs, one per physical port */
 #define PGID_SRC                       80
 
-#define IFH_TAG_TYPE_C                 0
-#define IFH_TAG_TYPE_S                 1
-
-#define IFH_REW_OP_NOOP                        0x0
-#define IFH_REW_OP_DSCP                        0x1
-#define IFH_REW_OP_ONE_STEP_PTP                0x2
-#define IFH_REW_OP_TWO_STEP_PTP                0x3
-#define IFH_REW_OP_ORIGIN_PTP          0x5
-
 #define OCELOT_NUM_TC                  8
 
 #define OCELOT_SPEED_2500              0
@@ -603,10 +594,10 @@ struct ocelot_port {
        /* The VLAN ID that will be transmitted as untagged, on egress */
        struct ocelot_vlan              native_vlan;
 
+       unsigned int                    ptp_skbs_in_flight;
        u8                              ptp_cmd;
        struct sk_buff_head             tx_skbs;
        u8                              ts_id;
-       spinlock_t                      ts_id_lock;
 
        phy_interface_t                 phy_mode;
 
@@ -680,6 +671,9 @@ struct ocelot {
        struct ptp_clock                *ptp_clock;
        struct ptp_clock_info           ptp_info;
        struct hwtstamp_config          hwtstamp_config;
+       unsigned int                    ptp_skbs_in_flight;
+       /* Protects the 2-step TX timestamp ID logic */
+       spinlock_t                      ts_id_lock;
        /* Protects the PTP interface state */
        struct mutex                    ptp_lock;
        /* Protects the PTP clock */
@@ -692,15 +686,6 @@ struct ocelot_policer {
        u32 burst; /* bytes */
 };
 
-struct ocelot_skb_cb {
-       struct sk_buff *clone;
-       u8 ptp_cmd;
-       u8 ts_id;
-};
-
-#define OCELOT_SKB_CB(skb) \
-       ((struct ocelot_skb_cb *)((skb)->cb))
-
 #define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
 #define ocelot_read_gix(ocelot, reg, gi) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi))
 #define ocelot_read_rix(ocelot, reg, ri) __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri))
@@ -752,8 +737,6 @@ u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
 void __ocelot_target_write_ix(struct ocelot *ocelot, enum ocelot_target target,
                              u32 val, u32 reg, u32 offset);
 
-#if IS_ENABLED(CONFIG_MSCC_OCELOT_SWITCH_LIB)
-
 /* Packet I/O */
 bool ocelot_can_inject(struct ocelot *ocelot, int grp);
 void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
@@ -761,36 +744,6 @@ void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
 int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **skb);
 void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp);
 
-u32 ocelot_ptp_rew_op(struct sk_buff *skb);
-#else
-
-static inline bool ocelot_can_inject(struct ocelot *ocelot, int grp)
-{
-       return false;
-}
-
-static inline void ocelot_port_inject_frame(struct ocelot *ocelot, int port,
-                                           int grp, u32 rew_op,
-                                           struct sk_buff *skb)
-{
-}
-
-static inline int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp,
-                                       struct sk_buff **skb)
-{
-       return -EIO;
-}
-
-static inline void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp)
-{
-}
-
-static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb)
-{
-       return 0;
-}
-#endif
-
 /* Hardware initialization */
 int ocelot_regfields_init(struct ocelot *ocelot,
                          const struct reg_field *const regfields);
index ded497d72bdbbcad572be9636416b04bf24c8546..f085884b1fa27ffc14293deb5a3a7bb8083b5662 100644 (file)
@@ -13,6 +13,9 @@
 #include <linux/ptp_clock_kernel.h>
 #include <soc/mscc/ocelot.h>
 
+#define OCELOT_MAX_PTP_ID              63
+#define OCELOT_PTP_FIFO_SIZE           128
+
 #define PTP_PIN_CFG_RSZ                        0x20
 #define PTP_PIN_TOD_SEC_MSB_RSZ                PTP_PIN_CFG_RSZ
 #define PTP_PIN_TOD_SEC_LSB_RSZ                PTP_PIN_CFG_RSZ
index 25fd525aaf92844e75b7e709415a4fdfc530671f..4869ebbd438d9a66c584bce7573ccfa1cba25431 100644 (file)
@@ -694,7 +694,7 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
 int ocelot_vcap_filter_del(struct ocelot *ocelot,
                           struct ocelot_vcap_filter *rule);
 struct ocelot_vcap_filter *
-ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int id,
-                                   bool tc_offload);
+ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block,
+                                   unsigned long cookie, bool tc_offload);
 
 #endif /* _OCELOT_VCAP_H_ */
index 01570dbda503850a0a7eb5fb2da4cc3d806401e1..0e45963bb767f767f85eeb59e8bc6dc8603959a2 100644 (file)
@@ -224,6 +224,7 @@ struct hda_codec {
 #endif
 
        /* misc flags */
+       unsigned int configured:1; /* codec was configured */
        unsigned int in_freeing:1; /* being released */
        unsigned int registered:1; /* codec was registered */
        unsigned int display_power_control:1; /* needs display power */
index 989e1517332d63184602a78e1f51351832e58f4d..7a08ed2acd609f7a969a4d480cb5a2739939d677 100644 (file)
@@ -98,6 +98,7 @@ struct snd_rawmidi_file {
        struct snd_rawmidi *rmidi;
        struct snd_rawmidi_substream *input;
        struct snd_rawmidi_substream *output;
+       unsigned int user_pversion;     /* supported protocol version */
 };
 
 struct snd_rawmidi_str {
index 9a448fe9355dbebaf08148b2f70a29b652a596fc..920b6a303d60cbfbb9276f3aef8ad2c571173b07 100644 (file)
@@ -178,7 +178,7 @@ TRACE_EVENT(cachefiles_unlink,
                             ),
 
            TP_fast_assign(
-                   __entry->obj        = obj->fscache.debug_id;
+                   __entry->obj        = obj ? obj->fscache.debug_id : UINT_MAX;
                    __entry->de         = de;
                    __entry->why        = why;
                           ),
@@ -205,7 +205,7 @@ TRACE_EVENT(cachefiles_rename,
                             ),
 
            TP_fast_assign(
-                   __entry->obj        = obj->fscache.debug_id;
+                   __entry->obj        = obj ? obj->fscache.debug_id : UINT_MAX;
                    __entry->de         = de;
                    __entry->to         = to;
                    __entry->why        = why;
@@ -305,7 +305,7 @@ TRACE_EVENT(cachefiles_mark_buried,
                             ),
 
            TP_fast_assign(
-                   __entry->obj        = obj->fscache.debug_id;
+                   __entry->obj        = obj ? obj->fscache.debug_id : UINT_MAX;
                    __entry->de         = de;
                    __entry->why        = why;
                           ),
index 491098a0d8ed94fd691ca8e494ef7f0bba6c055e..bf7533f171ff9c6542203d31599956227c11cc66 100644 (file)
 
 TRACE_EVENT(kyber_latency,
 
-       TP_PROTO(struct request_queue *q, const char *domain, const char *type,
+       TP_PROTO(dev_t dev, const char *domain, const char *type,
                 unsigned int percentile, unsigned int numerator,
                 unsigned int denominator, unsigned int samples),
 
-       TP_ARGS(q, domain, type, percentile, numerator, denominator, samples),
+       TP_ARGS(dev, domain, type, percentile, numerator, denominator, samples),
 
        TP_STRUCT__entry(
                __field(        dev_t,  dev                             )
@@ -30,7 +30,7 @@ TRACE_EVENT(kyber_latency,
        ),
 
        TP_fast_assign(
-               __entry->dev            = disk_devt(q->disk);
+               __entry->dev            = dev;
                strlcpy(__entry->domain, domain, sizeof(__entry->domain));
                strlcpy(__entry->type, type, sizeof(__entry->type));
                __entry->percentile     = percentile;
@@ -47,10 +47,9 @@ TRACE_EVENT(kyber_latency,
 
 TRACE_EVENT(kyber_adjust,
 
-       TP_PROTO(struct request_queue *q, const char *domain,
-                unsigned int depth),
+       TP_PROTO(dev_t dev, const char *domain, unsigned int depth),
 
-       TP_ARGS(q, domain, depth),
+       TP_ARGS(dev, domain, depth),
 
        TP_STRUCT__entry(
                __field(        dev_t,  dev                     )
@@ -59,7 +58,7 @@ TRACE_EVENT(kyber_adjust,
        ),
 
        TP_fast_assign(
-               __entry->dev            = disk_devt(q->disk);
+               __entry->dev            = dev;
                strlcpy(__entry->domain, domain, sizeof(__entry->domain));
                __entry->depth          = depth;
        ),
@@ -71,9 +70,9 @@ TRACE_EVENT(kyber_adjust,
 
 TRACE_EVENT(kyber_throttled,
 
-       TP_PROTO(struct request_queue *q, const char *domain),
+       TP_PROTO(dev_t dev, const char *domain),
 
-       TP_ARGS(q, domain),
+       TP_ARGS(dev, domain),
 
        TP_STRUCT__entry(
                __field(        dev_t,  dev                     )
@@ -81,7 +80,7 @@ TRACE_EVENT(kyber_throttled,
        ),
 
        TP_fast_assign(
-               __entry->dev            = disk_devt(q->disk);
+               __entry->dev            = dev;
                strlcpy(__entry->domain, domain, sizeof(__entry->domain));
        ),
 
index 6135d92e0d47bf2f7fd5b79f9d982dc598c5c184..daf82a230c0e711dfbe8efcdffdddf48bd617890 100644 (file)
@@ -26,7 +26,7 @@
 #ifndef _UAPI_HYPERV_H
 #define _UAPI_HYPERV_H
 
-#include <linux/uuid.h>
+#include <linux/types.h>
 
 /*
  * Framework version for util services.
index 52b54d13f3850fcd6e95dcc3119a2ad6bd613564..6acd4ccafbf71bf770ac463cab2d0fe61d72ad86 100644 (file)
@@ -10,6 +10,7 @@
 #define __UAPI_MCTP_H
 
 #include <linux/types.h>
+#include <linux/socket.h>
 
 typedef __u8                   mctp_eid_t;
 
@@ -18,11 +19,13 @@ struct mctp_addr {
 };
 
 struct sockaddr_mctp {
-       unsigned short int      smctp_family;
-       int                     smctp_network;
+       __kernel_sa_family_t    smctp_family;
+       __u16                   __smctp_pad0;
+       unsigned int            smctp_network;
        struct mctp_addr        smctp_addr;
        __u8                    smctp_type;
        __u8                    smctp_tag;
+       __u8                    __smctp_pad1;
 };
 
 #define MCTP_NET_ANY           0x0
index b96c1ea7166d6d277e50dc476f0927a16651a0c8..eda0426ec4c2bf09db2ec9f5cd94ee11019e62ab 100644 (file)
@@ -213,13 +213,13 @@ enum {
        XFRM_MSG_GETSPDINFO,
 #define XFRM_MSG_GETSPDINFO XFRM_MSG_GETSPDINFO
 
+       XFRM_MSG_MAPPING,
+#define XFRM_MSG_MAPPING XFRM_MSG_MAPPING
+
        XFRM_MSG_SETDEFAULT,
 #define XFRM_MSG_SETDEFAULT XFRM_MSG_SETDEFAULT
        XFRM_MSG_GETDEFAULT,
 #define XFRM_MSG_GETDEFAULT XFRM_MSG_GETDEFAULT
-
-       XFRM_MSG_MAPPING,
-#define XFRM_MSG_MAPPING XFRM_MSG_MAPPING
        __XFRM_MSG_MAX
 };
 #define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1)
@@ -514,9 +514,12 @@ struct xfrm_user_offload {
 #define XFRM_OFFLOAD_INBOUND   2
 
 struct xfrm_userpolicy_default {
-#define XFRM_USERPOLICY_DIRMASK_MAX    (sizeof(__u8) * 8)
-       __u8                            dirmask;
-       __u8                            action;
+#define XFRM_USERPOLICY_UNSPEC 0
+#define XFRM_USERPOLICY_BLOCK  1
+#define XFRM_USERPOLICY_ACCEPT 2
+       __u8                            in;
+       __u8                            fwd;
+       __u8                            out;
 };
 
 #ifndef __KERNEL__
index 7cc2a0f3f2f56e462bc2cef69aa73eb5c459bb49..d13bb8c1b45058b81bc2ba52f9c07a64f8a2ce3d 100644 (file)
@@ -917,7 +917,6 @@ struct hl_wait_cs_in {
 #define HL_WAIT_CS_STATUS_BUSY         1
 #define HL_WAIT_CS_STATUS_TIMEDOUT     2
 #define HL_WAIT_CS_STATUS_ABORTED      3
-#define HL_WAIT_CS_STATUS_INTERRUPTED  4
 
 #define HL_WAIT_CS_STATUS_FLAG_GONE            0x1
 #define HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD   0x2
@@ -1286,7 +1285,8 @@ struct hl_debug_args {
  * EIO       - The CS was aborted (usually because the device was reset)
  * ENODEV    - The device wants to do hard-reset (so user need to close FD)
  *
- * The driver also returns a custom define inside the IOCTL which can be:
+ * The driver also returns a custom define in case the IOCTL call returned 0.
+ * The define can be one of the following:
  *
  * HL_WAIT_CS_STATUS_COMPLETED   - The CS has been completed successfully (0)
  * HL_WAIT_CS_STATUS_BUSY        - The CS is still executing (0)
@@ -1294,8 +1294,6 @@ struct hl_debug_args {
  *                                 (ETIMEDOUT)
  * HL_WAIT_CS_STATUS_ABORTED     - The CS was aborted, usually because the
  *                                 device was reset (EIO)
- * HL_WAIT_CS_STATUS_INTERRUPTED - Waiting for the CS was interrupted (EINTR)
- *
  */
 
 #define HL_IOCTL_WAIT_CS                       \
index 1d84ec9db93bd34e77ab31643cd8b5534480f1a1..5859ca0a1439be4cc3276fd54d9cc20c3c28fa25 100644 (file)
@@ -784,6 +784,7 @@ struct snd_rawmidi_status {
 
 #define SNDRV_RAWMIDI_IOCTL_PVERSION   _IOR('W', 0x00, int)
 #define SNDRV_RAWMIDI_IOCTL_INFO       _IOR('W', 0x01, struct snd_rawmidi_info)
+#define SNDRV_RAWMIDI_IOCTL_USER_PVERSION _IOW('W', 0x02, int)
 #define SNDRV_RAWMIDI_IOCTL_PARAMS     _IOWR('W', 0x10, struct snd_rawmidi_params)
 #define SNDRV_RAWMIDI_IOCTL_STATUS     _IOWR('W', 0x20, struct snd_rawmidi_status)
 #define SNDRV_RAWMIDI_IOCTL_DROP       _IOW('W', 0x30, int)
index db28e79b77ee7aa4293d9f17ffc02839404a7a3e..a3584a357f353c86ba162e2edbecfda1dffa4e1e 100644 (file)
@@ -52,12 +52,12 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
 #if defined(CONFIG_XEN_PV)
 int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
                  xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
-                 unsigned int domid, bool no_translate, struct page **pages);
+                 unsigned int domid, bool no_translate);
 #else
 static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
                                xen_pfn_t *pfn, int nr, int *err_ptr,
                                pgprot_t prot,  unsigned int domid,
-                               bool no_translate, struct page **pages)
+                               bool no_translate)
 {
        BUG();
        return 0;
@@ -134,7 +134,7 @@ static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
         */
        BUG_ON(err_ptr == NULL);
        return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
-                            false, pages);
+                            false);
 }
 
 /*
@@ -146,7 +146,6 @@ static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
  * @err_ptr: Returns per-MFN error status.
  * @prot:    page protection mask
  * @domid:   Domain owning the pages
- * @pages:   Array of pages if this domain has an auto-translated physmap
  *
  * @mfn and @err_ptr may point to the same buffer, the MFNs will be
  * overwritten by the error codes after they are mapped.
@@ -157,14 +156,13 @@ static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
 static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
                                             unsigned long addr, xen_pfn_t *mfn,
                                             int nr, int *err_ptr,
-                                            pgprot_t prot, unsigned int domid,
-                                            struct page **pages)
+                                            pgprot_t prot, unsigned int domid)
 {
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return -EOPNOTSUPP;
 
        return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
-                            true, pages);
+                            true);
 }
 
 /* xen_remap_domain_gfn_range() - map a range of foreign frames
@@ -188,8 +186,7 @@ static inline int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return -EOPNOTSUPP;
 
-       return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false,
-                            pages);
+       return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false);
 }
 
 int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
index 81a79a77db46c119648ed019a3b7eb1386079532..3c4054a9554580a8b65ea5caa33493d1ee825748 100644 (file)
@@ -382,6 +382,7 @@ static char * __init xbc_make_cmdline(const char *key)
        ret = xbc_snprint_cmdline(new_cmdline, len + 1, root);
        if (ret < 0 || ret > len) {
                pr_err("Failed to print extra kernel cmdline.\n");
+               memblock_free_ptr(new_cmdline, len + 1);
                return NULL;
        }
 
index 8dd73a64f9216a294822540436d72193e66bfedf..b1cb1dbf7417f0a02849aea33ed3f1c3444a5a08 100644 (file)
@@ -657,7 +657,7 @@ static int audit_filter_rules(struct task_struct *tsk,
                        result = audit_comparator(audit_loginuid_set(tsk), f->op, f->val);
                        break;
                case AUDIT_SADDR_FAM:
-                       if (ctx->sockaddr)
+                       if (ctx && ctx->sockaddr)
                                result = audit_comparator(ctx->sockaddr->ss_family,
                                                          f->op, f->val);
                        break;
index d6731c32864eb5c9002b56e5465650950dfea265..9abcc33f02cf01b552e7362c3ed11ce41fc4756d 100644 (file)
@@ -368,6 +368,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
                const struct btf_type *mtype, *ptype;
                struct bpf_prog *prog;
                u32 moff;
+               u32 flags;
 
                moff = btf_member_bit_offset(t, member) / 8;
                ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
@@ -431,10 +432,12 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
 
                tprogs[BPF_TRAMP_FENTRY].progs[0] = prog;
                tprogs[BPF_TRAMP_FENTRY].nr_progs = 1;
+               flags = st_ops->func_models[i].ret_size > 0 ?
+                       BPF_TRAMP_F_RET_FENTRY_RET : 0;
                err = arch_prepare_bpf_trampoline(NULL, image,
                                                  st_map->image + PAGE_SIZE,
-                                                 &st_ops->func_models[i], 0,
-                                                 tprogs, NULL);
+                                                 &st_ops->func_models[i],
+                                                 flags, tprogs, NULL);
                if (err < 0)
                        goto reset_unlock;
 
index 9f4636d021b108f178343c9822c673e547b0a8cb..d6b7dfdd806610165a223a48b77cfc1abe6d156a 100644 (file)
@@ -827,7 +827,7 @@ int bpf_jit_charge_modmem(u32 pages)
 {
        if (atomic_long_add_return(pages, &bpf_jit_current) >
            (bpf_jit_limit >> PAGE_SHIFT)) {
-               if (!capable(CAP_SYS_ADMIN)) {
+               if (!bpf_capable()) {
                        atomic_long_sub(pages, &bpf_jit_current);
                        return -EPERM;
                }
index 09a3fd97d329ea2b3ecdad1f1c5e85dbff0478ca..6e75bbee39f0b55aa74af7f702ce60f30713cd33 100644 (file)
@@ -63,7 +63,8 @@ static inline int stack_map_data_size(struct bpf_map *map)
 
 static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
 {
-       u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
+       u64 elem_size = sizeof(struct stack_map_bucket) +
+                       (u64)smap->map.value_size;
        int err;
 
        smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
index 8afa8690d288cc4daf023a2ef158cba7574c39da..570b0c97392a95fdfddf00cfed35c9a91e2e07fe 100644 (file)
@@ -6574,22 +6574,29 @@ int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v)
 
 void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
 {
-       /* Don't associate the sock with unrelated interrupted task's cgroup. */
-       if (in_interrupt())
-               return;
+       struct cgroup *cgroup;
 
        rcu_read_lock();
+       /* Don't associate the sock with unrelated interrupted task's cgroup. */
+       if (in_interrupt()) {
+               cgroup = &cgrp_dfl_root.cgrp;
+               cgroup_get(cgroup);
+               goto out;
+       }
+
        while (true) {
                struct css_set *cset;
 
                cset = task_css_set(current);
                if (likely(cgroup_tryget(cset->dfl_cgrp))) {
-                       skcd->cgroup = cset->dfl_cgrp;
-                       cgroup_bpf_get(cset->dfl_cgrp);
+                       cgroup = cset->dfl_cgrp;
                        break;
                }
                cpu_relax();
        }
+out:
+       skcd->cgroup = cgroup;
+       cgroup_bpf_get(cgroup);
        rcu_read_unlock();
 }
 
index df1ccf4558f8242904aad112bdf86f68ed3fa76b..2a9695ccb65f539c713fdce1cc9bb2fd15c4779f 100644 (file)
@@ -311,17 +311,19 @@ static struct cpuset top_cpuset = {
                if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
 
 /*
- * There are two global locks guarding cpuset structures - cpuset_mutex and
+ * There are two global locks guarding cpuset structures - cpuset_rwsem and
  * callback_lock. We also require taking task_lock() when dereferencing a
  * task's cpuset pointer. See "The task_lock() exception", at the end of this
- * comment.
+ * comment.  The cpuset code uses only cpuset_rwsem write lock.  Other
+ * kernel subsystems can use cpuset_read_lock()/cpuset_read_unlock() to
+ * prevent change to cpuset structures.
  *
  * A task must hold both locks to modify cpusets.  If a task holds
- * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
+ * cpuset_rwsem, it blocks others wanting that rwsem, ensuring that it
  * is the only task able to also acquire callback_lock and be able to
  * modify cpusets.  It can perform various checks on the cpuset structure
  * first, knowing nothing will change.  It can also allocate memory while
- * just holding cpuset_mutex.  While it is performing these checks, various
+ * just holding cpuset_rwsem.  While it is performing these checks, various
  * callback routines can briefly acquire callback_lock to query cpusets.
  * Once it is ready to make the changes, it takes callback_lock, blocking
  * everyone else.
@@ -393,7 +395,7 @@ static inline bool is_in_v2_mode(void)
  * One way or another, we guarantee to return some non-empty subset
  * of cpu_online_mask.
  *
- * Call with callback_lock or cpuset_mutex held.
+ * Call with callback_lock or cpuset_rwsem held.
  */
 static void guarantee_online_cpus(struct task_struct *tsk,
                                  struct cpumask *pmask)
@@ -435,7 +437,7 @@ out_unlock:
  * One way or another, we guarantee to return some non-empty subset
  * of node_states[N_MEMORY].
  *
- * Call with callback_lock or cpuset_mutex held.
+ * Call with callback_lock or cpuset_rwsem held.
  */
 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
 {
@@ -447,7 +449,7 @@ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
 /*
  * update task's spread flag if cpuset's page/slab spread flag is set
  *
- * Call with callback_lock or cpuset_mutex held.
+ * Call with callback_lock or cpuset_rwsem held.
  */
 static void cpuset_update_task_spread_flag(struct cpuset *cs,
                                        struct task_struct *tsk)
@@ -468,7 +470,7 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs,
  *
  * One cpuset is a subset of another if all its allowed CPUs and
  * Memory Nodes are a subset of the other, and its exclusive flags
- * are only set if the other's are set.  Call holding cpuset_mutex.
+ * are only set if the other's are set.  Call holding cpuset_rwsem.
  */
 
 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
@@ -577,7 +579,7 @@ static inline void free_cpuset(struct cpuset *cs)
  * If we replaced the flag and mask values of the current cpuset
  * (cur) with those values in the trial cpuset (trial), would
  * our various subset and exclusive rules still be valid?  Presumes
- * cpuset_mutex held.
+ * cpuset_rwsem held.
  *
  * 'cur' is the address of an actual, in-use cpuset.  Operations
  * such as list traversal that depend on the actual address of the
@@ -700,7 +702,7 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
        rcu_read_unlock();
 }
 
-/* Must be called with cpuset_mutex held.  */
+/* Must be called with cpuset_rwsem held.  */
 static inline int nr_cpusets(void)
 {
        /* jump label reference count + the top-level cpuset */
@@ -726,7 +728,7 @@ static inline int nr_cpusets(void)
  * domains when operating in the severe memory shortage situations
  * that could cause allocation failures below.
  *
- * Must be called with cpuset_mutex held.
+ * Must be called with cpuset_rwsem held.
  *
  * The three key local variables below are:
  *    cp - cpuset pointer, used (together with pos_css) to perform a
@@ -1005,7 +1007,7 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
  * 'cpus' is removed, then call this routine to rebuild the
  * scheduler's dynamic sched domains.
  *
- * Call with cpuset_mutex held.  Takes cpus_read_lock().
+ * Call with cpuset_rwsem held.  Takes cpus_read_lock().
  */
 static void rebuild_sched_domains_locked(void)
 {
@@ -1078,7 +1080,7 @@ void rebuild_sched_domains(void)
  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
  *
  * Iterate through each task of @cs updating its cpus_allowed to the
- * effective cpuset's.  As this function is called with cpuset_mutex held,
+ * effective cpuset's.  As this function is called with cpuset_rwsem held,
  * cpuset membership stays stable.
  */
 static void update_tasks_cpumask(struct cpuset *cs)
@@ -1347,7 +1349,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
  *
  * On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
  *
- * Called with cpuset_mutex held
+ * Called with cpuset_rwsem held
  */
 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
 {
@@ -1704,12 +1706,12 @@ static void *cpuset_being_rebound;
  * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
  *
  * Iterate through each task of @cs updating its mems_allowed to the
- * effective cpuset's.  As this function is called with cpuset_mutex held,
+ * effective cpuset's.  As this function is called with cpuset_rwsem held,
  * cpuset membership stays stable.
  */
 static void update_tasks_nodemask(struct cpuset *cs)
 {
-       static nodemask_t newmems;      /* protected by cpuset_mutex */
+       static nodemask_t newmems;      /* protected by cpuset_rwsem */
        struct css_task_iter it;
        struct task_struct *task;
 
@@ -1722,7 +1724,7 @@ static void update_tasks_nodemask(struct cpuset *cs)
         * take while holding tasklist_lock.  Forks can happen - the
         * mpol_dup() cpuset_being_rebound check will catch such forks,
         * and rebind their vma mempolicies too.  Because we still hold
-        * the global cpuset_mutex, we know that no other rebind effort
+        * the global cpuset_rwsem, we know that no other rebind effort
         * will be contending for the global variable cpuset_being_rebound.
         * It's ok if we rebind the same mm twice; mpol_rebind_mm()
         * is idempotent.  Also migrate pages in each mm to new nodes.
@@ -1768,7 +1770,7 @@ static void update_tasks_nodemask(struct cpuset *cs)
  *
  * On legacy hierarchy, effective_mems will be the same with mems_allowed.
  *
- * Called with cpuset_mutex held
+ * Called with cpuset_rwsem held
  */
 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
 {
@@ -1821,7 +1823,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
  * mempolicies and if the cpuset is marked 'memory_migrate',
  * migrate the tasks pages to the new memory.
  *
- * Call with cpuset_mutex held. May take callback_lock during call.
+ * Call with cpuset_rwsem held. May take callback_lock during call.
  * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
  * lock each such tasks mm->mmap_lock, scan its vma's and rebind
  * their mempolicies to the cpusets new mems_allowed.
@@ -1911,7 +1913,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
  * @cs: the cpuset in which each task's spread flags needs to be changed
  *
  * Iterate through each task of @cs updating its spread flags.  As this
- * function is called with cpuset_mutex held, cpuset membership stays
+ * function is called with cpuset_rwsem held, cpuset membership stays
  * stable.
  */
 static void update_tasks_flags(struct cpuset *cs)
@@ -1931,7 +1933,7 @@ static void update_tasks_flags(struct cpuset *cs)
  * cs:         the cpuset to update
  * turning_on:         whether the flag is being set or cleared
  *
- * Call with cpuset_mutex held.
+ * Call with cpuset_rwsem held.
  */
 
 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
@@ -1980,7 +1982,7 @@ out:
  * cs: the cpuset to update
  * new_prs: new partition root state
  *
- * Call with cpuset_mutex held.
+ * Call with cpuset_rwsem held.
  */
 static int update_prstate(struct cpuset *cs, int new_prs)
 {
@@ -2167,7 +2169,7 @@ static int fmeter_getrate(struct fmeter *fmp)
 
 static struct cpuset *cpuset_attach_old_cs;
 
-/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
+/* Called by cgroups to determine if a cpuset is usable; cpuset_rwsem held */
 static int cpuset_can_attach(struct cgroup_taskset *tset)
 {
        struct cgroup_subsys_state *css;
@@ -2219,7 +2221,7 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
 }
 
 /*
- * Protected by cpuset_mutex.  cpus_attach is used only by cpuset_attach()
+ * Protected by cpuset_rwsem.  cpus_attach is used only by cpuset_attach()
  * but we can't allocate it dynamically there.  Define it global and
  * allocate from cpuset_init().
  */
@@ -2227,7 +2229,7 @@ static cpumask_var_t cpus_attach;
 
 static void cpuset_attach(struct cgroup_taskset *tset)
 {
-       /* static buf protected by cpuset_mutex */
+       /* static buf protected by cpuset_rwsem */
        static nodemask_t cpuset_attach_nodemask_to;
        struct task_struct *task;
        struct task_struct *leader;
@@ -2417,7 +2419,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
         * operation like this one can lead to a deadlock through kernfs
         * active_ref protection.  Let's break the protection.  Losing the
         * protection is okay as we check whether @cs is online after
-        * grabbing cpuset_mutex anyway.  This only happens on the legacy
+        * grabbing cpuset_rwsem anyway.  This only happens on the legacy
         * hierarchies.
         */
        css_get(&cs->css);
@@ -3672,7 +3674,7 @@ void __cpuset_memory_pressure_bump(void)
  *  - Used for /proc/<pid>/cpuset.
  *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
  *    doesn't really matter if tsk->cpuset changes after we read it,
- *    and we take cpuset_mutex, keeping cpuset_attach() from changing it
+ *    and we take cpuset_rwsem, keeping cpuset_attach() from changing it
  *    anyway.
  */
 int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
index f784e08c2fbd6161e50c66a49f5ab4dfedef155b..1ae0b4948a5a820d4ea3f758c733418dcb3f286f 100644 (file)
@@ -225,8 +225,6 @@ struct cred *cred_alloc_blank(void)
 #ifdef CONFIG_DEBUG_CREDENTIALS
        new->magic = CRED_MAGIC;
 #endif
-       new->ucounts = get_ucounts(&init_ucounts);
-
        if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0)
                goto error;
 
@@ -501,7 +499,7 @@ int commit_creds(struct cred *new)
                inc_rlimit_ucounts(new->ucounts, UCOUNT_RLIMIT_NPROC, 1);
        rcu_assign_pointer(task->real_cred, new);
        rcu_assign_pointer(task->cred, new);
-       if (new->user != old->user)
+       if (new->user != old->user || new->user_ns != old->user_ns)
                dec_rlimit_ucounts(old->ucounts, UCOUNT_RLIMIT_NPROC, 1);
        alter_cred_subscribers(old, -2);
 
@@ -669,7 +667,7 @@ int set_cred_ucounts(struct cred *new)
 {
        struct task_struct *task = current;
        const struct cred *old = task->real_cred;
-       struct ucounts *old_ucounts = new->ucounts;
+       struct ucounts *new_ucounts, *old_ucounts = new->ucounts;
 
        if (new->user == old->user && new->user_ns == old->user_ns)
                return 0;
@@ -681,9 +679,10 @@ int set_cred_ucounts(struct cred *new)
        if (old_ucounts && old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->euid))
                return 0;
 
-       if (!(new->ucounts = alloc_ucounts(new->user_ns, new->euid)))
+       if (!(new_ucounts = alloc_ucounts(new->user_ns, new->euid)))
                return -EAGAIN;
 
+       new->ucounts = new_ucounts;
        if (old_ucounts)
                put_ucounts(old_ucounts);
 
index 95445bd6eb7258d9d6a9fd26acc79748574fa12d..7a14ca29c37782d5b5b6e2a234cc9bdf57de9312 100644 (file)
@@ -552,7 +552,7 @@ static void active_cacheline_remove(struct dma_debug_entry *entry)
  * Wrapper function for adding an entry to the hash.
  * This function takes care of locking itself.
  */
-static void add_dma_entry(struct dma_debug_entry *entry)
+static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
 {
        struct hash_bucket *bucket;
        unsigned long flags;
@@ -566,7 +566,7 @@ static void add_dma_entry(struct dma_debug_entry *entry)
        if (rc == -ENOMEM) {
                pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
                global_disable = true;
-       } else if (rc == -EEXIST) {
+       } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
                err_printk(entry->dev, entry,
                        "cacheline tracking EEXIST, overlapping mappings aren't supported\n");
        }
@@ -1191,7 +1191,8 @@ void debug_dma_map_single(struct device *dev, const void *addr,
 EXPORT_SYMBOL(debug_dma_map_single);
 
 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
-                       size_t size, int direction, dma_addr_t dma_addr)
+                       size_t size, int direction, dma_addr_t dma_addr,
+                       unsigned long attrs)
 {
        struct dma_debug_entry *entry;
 
@@ -1222,7 +1223,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
                check_for_illegal_area(dev, addr, size);
        }
 
-       add_dma_entry(entry);
+       add_dma_entry(entry, attrs);
 }
 
 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
@@ -1280,7 +1281,8 @@ void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
 }
 
 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
-                     int nents, int mapped_ents, int direction)
+                     int nents, int mapped_ents, int direction,
+                     unsigned long attrs)
 {
        struct dma_debug_entry *entry;
        struct scatterlist *s;
@@ -1289,6 +1291,12 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
        if (unlikely(dma_debug_disabled()))
                return;
 
+       for_each_sg(sg, s, nents, i) {
+               check_for_stack(dev, sg_page(s), s->offset);
+               if (!PageHighMem(sg_page(s)))
+                       check_for_illegal_area(dev, sg_virt(s), s->length);
+       }
+
        for_each_sg(sg, s, mapped_ents, i) {
                entry = dma_entry_alloc();
                if (!entry)
@@ -1304,15 +1312,9 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
                entry->sg_call_ents   = nents;
                entry->sg_mapped_ents = mapped_ents;
 
-               check_for_stack(dev, sg_page(s), s->offset);
-
-               if (!PageHighMem(sg_page(s))) {
-                       check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
-               }
-
                check_sg_segment(dev, s);
 
-               add_dma_entry(entry);
+               add_dma_entry(entry, attrs);
        }
 }
 
@@ -1368,7 +1370,8 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
 }
 
 void debug_dma_alloc_coherent(struct device *dev, size_t size,
-                             dma_addr_t dma_addr, void *virt)
+                             dma_addr_t dma_addr, void *virt,
+                             unsigned long attrs)
 {
        struct dma_debug_entry *entry;
 
@@ -1398,7 +1401,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
        else
                entry->pfn = page_to_pfn(virt_to_page(virt));
 
-       add_dma_entry(entry);
+       add_dma_entry(entry, attrs);
 }
 
 void debug_dma_free_coherent(struct device *dev, size_t size,
@@ -1429,7 +1432,8 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
 }
 
 void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
-                           int direction, dma_addr_t dma_addr)
+                           int direction, dma_addr_t dma_addr,
+                           unsigned long attrs)
 {
        struct dma_debug_entry *entry;
 
@@ -1449,7 +1453,7 @@ void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
        entry->direction        = direction;
        entry->map_err_type     = MAP_ERR_NOT_CHECKED;
 
-       add_dma_entry(entry);
+       add_dma_entry(entry, attrs);
 }
 
 void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
index 83643b3010b2fc305e25956e51e96d24aa5a3042..f525197d3cae605aa7e0499eb2ca6f1ae134ec93 100644 (file)
 #ifdef CONFIG_DMA_API_DEBUG
 extern void debug_dma_map_page(struct device *dev, struct page *page,
                               size_t offset, size_t size,
-                              int direction, dma_addr_t dma_addr);
+                              int direction, dma_addr_t dma_addr,
+                              unsigned long attrs);
 
 extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
                                 size_t size, int direction);
 
 extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
-                            int nents, int mapped_ents, int direction);
+                            int nents, int mapped_ents, int direction,
+                            unsigned long attrs);
 
 extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
                               int nelems, int dir);
 
 extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
-                                    dma_addr_t dma_addr, void *virt);
+                                    dma_addr_t dma_addr, void *virt,
+                                    unsigned long attrs);
 
 extern void debug_dma_free_coherent(struct device *dev, size_t size,
                                    void *virt, dma_addr_t addr);
 
 extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
                                   size_t size, int direction,
-                                  dma_addr_t dma_addr);
+                                  dma_addr_t dma_addr,
+                                  unsigned long attrs);
 
 extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
                                     size_t size, int direction);
@@ -53,7 +57,8 @@ extern void debug_dma_sync_sg_for_device(struct device *dev,
 #else /* CONFIG_DMA_API_DEBUG */
 static inline void debug_dma_map_page(struct device *dev, struct page *page,
                                      size_t offset, size_t size,
-                                     int direction, dma_addr_t dma_addr)
+                                     int direction, dma_addr_t dma_addr,
+                                     unsigned long attrs)
 {
 }
 
@@ -63,7 +68,8 @@ static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
 }
 
 static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
-                                   int nents, int mapped_ents, int direction)
+                                   int nents, int mapped_ents, int direction,
+                                   unsigned long attrs)
 {
 }
 
@@ -74,7 +80,8 @@ static inline void debug_dma_unmap_sg(struct device *dev,
 }
 
 static inline void debug_dma_alloc_coherent(struct device *dev, size_t size,
-                                           dma_addr_t dma_addr, void *virt)
+                                           dma_addr_t dma_addr, void *virt,
+                                           unsigned long attrs)
 {
 }
 
@@ -85,7 +92,8 @@ static inline void debug_dma_free_coherent(struct device *dev, size_t size,
 
 static inline void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
                                          size_t size, int direction,
-                                         dma_addr_t dma_addr)
+                                         dma_addr_t dma_addr,
+                                         unsigned long attrs)
 {
 }
 
index 06fec5547e7c790a58ab02183d31228cb94d5f09..8349a9f2c3453208a10ab5bd4b8de5f70c29d983 100644 (file)
@@ -156,7 +156,7 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
                addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
        else
                addr = ops->map_page(dev, page, offset, size, dir, attrs);
-       debug_dma_map_page(dev, page, offset, size, dir, addr);
+       debug_dma_map_page(dev, page, offset, size, dir, addr, attrs);
 
        return addr;
 }
@@ -195,7 +195,7 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
                ents = ops->map_sg(dev, sg, nents, dir, attrs);
 
        if (ents > 0)
-               debug_dma_map_sg(dev, sg, nents, ents, dir);
+               debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
        else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
                              ents != -EIO))
                return -EIO;
@@ -249,12 +249,12 @@ EXPORT_SYMBOL(dma_map_sg_attrs);
  * Returns 0 on success or a negative error code on error. The following
  * error codes are supported with the given meaning:
  *
- *   -EINVAL - An invalid argument, unaligned access or other error
- *            in usage. Will not succeed if retried.
- *   -ENOMEM - Insufficient resources (like memory or IOVA space) to
- *            complete the mapping. Should succeed if retried later.
- *   -EIO    - Legacy error code with an unknown meaning. eg. this is
- *            returned if a lower level call returned DMA_MAPPING_ERROR.
+ *   -EINVAL   An invalid argument, unaligned access or other error
+ *             in usage. Will not succeed if retried.
+ *   -ENOMEM   Insufficient resources (like memory or IOVA space) to
+ *             complete the mapping. Should succeed if retried later.
+ *   -EIO      Legacy error code with an unknown meaning. eg. this is
+ *             returned if a lower level call returned DMA_MAPPING_ERROR.
  */
 int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
                    enum dma_data_direction dir, unsigned long attrs)
@@ -305,7 +305,7 @@ dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
        else if (ops->map_resource)
                addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
 
-       debug_dma_map_resource(dev, phys_addr, size, dir, addr);
+       debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs);
        return addr;
 }
 EXPORT_SYMBOL(dma_map_resource);
@@ -510,7 +510,7 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
        else
                return NULL;
 
-       debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
+       debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs);
        return cpu_addr;
 }
 EXPORT_SYMBOL(dma_alloc_attrs);
@@ -566,7 +566,7 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
        struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
 
        if (page)
-               debug_dma_map_page(dev, page, 0, size, dir, *dma_handle);
+               debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
        return page;
 }
 EXPORT_SYMBOL_GPL(dma_alloc_pages);
@@ -644,7 +644,7 @@ struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
 
        if (sgt) {
                sgt->nents = 1;
-               debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir);
+               debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
        }
        return sgt;
 }
index 0c000cb01eeb159bb60b06a9ccf7e539dcdf0344..f23ca260307f02dbc501bd0b8e36ece31f82faf5 100644 (file)
@@ -3707,6 +3707,29 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
        return 0;
 }
 
+static inline bool event_update_userpage(struct perf_event *event)
+{
+       if (likely(!atomic_read(&event->mmap_count)))
+               return false;
+
+       perf_event_update_time(event);
+       perf_set_shadow_time(event, event->ctx);
+       perf_event_update_userpage(event);
+
+       return true;
+}
+
+static inline void group_update_userpage(struct perf_event *group_event)
+{
+       struct perf_event *event;
+
+       if (!event_update_userpage(group_event))
+               return;
+
+       for_each_sibling_event(event, group_event)
+               event_update_userpage(event);
+}
+
 static int merge_sched_in(struct perf_event *event, void *data)
 {
        struct perf_event_context *ctx = event->ctx;
@@ -3725,14 +3748,15 @@ static int merge_sched_in(struct perf_event *event, void *data)
        }
 
        if (event->state == PERF_EVENT_STATE_INACTIVE) {
+               *can_add_hw = 0;
                if (event->attr.pinned) {
                        perf_cgroup_event_disable(event, ctx);
                        perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
+               } else {
+                       ctx->rotate_necessary = 1;
+                       perf_mux_hrtimer_restart(cpuctx);
+                       group_update_userpage(event);
                }
-
-               *can_add_hw = 0;
-               ctx->rotate_necessary = 1;
-               perf_mux_hrtimer_restart(cpuctx);
        }
 
        return 0;
@@ -6324,6 +6348,8 @@ accounting:
 
                ring_buffer_attach(event, rb);
 
+               perf_event_update_time(event);
+               perf_set_shadow_time(event, event->ctx);
                perf_event_init_userpage(event);
                perf_event_update_userpage(event);
        } else {
index 40ec9a030eecf41d5597e6c910746aab385b1da0..5c26a76e800b579a1e450b1aab962da2df7fc0d6 100644 (file)
@@ -4489,8 +4489,10 @@ static void cfi_init(struct module *mod)
        /* Fix init/exit functions to point to the CFI jump table */
        if (init)
                mod->init = *init;
+#ifdef CONFIG_MODULE_UNLOAD
        if (exit)
                mod->exit = *exit;
+#endif
 
        cfi_module_add(mod, module_addr_min);
 #endif
index 1bba4128a3e68b6a5db9fe5bcf7ff2cb760b0232..f21714ea3db853be086bd2d52b6c83c61daebac3 100644 (file)
@@ -8795,6 +8795,7 @@ void idle_task_exit(void)
                finish_arch_post_lock_switch();
        }
 
+       scs_task_reset(current);
        /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
 }
 
index 49716228efb4bbed0a362e2ae0533adb5edde72c..17a653b67006af6fed33ceae0ee3608d8e8eb0ad 100644 (file)
@@ -173,16 +173,22 @@ static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
                                   size_t cnt, loff_t *ppos)
 {
        char buf[16];
+       unsigned int scaling;
 
        if (cnt > 15)
                cnt = 15;
 
        if (copy_from_user(&buf, ubuf, cnt))
                return -EFAULT;
+       buf[cnt] = '\0';
 
-       if (kstrtouint(buf, 10, &sysctl_sched_tunable_scaling))
+       if (kstrtouint(buf, 10, &scaling))
                return -EINVAL;
 
+       if (scaling >= SCHED_TUNABLESCALING_END)
+               return -EINVAL;
+
+       sysctl_sched_tunable_scaling = scaling;
        if (sched_update_scaling())
                return -EINVAL;
 
index ff69f245b939595ab914c70c1f7fa01c1843778a..f6a05d9b54436ae0353d88574b8669e98c468d4e 100644 (file)
@@ -4936,8 +4936,12 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
        /* update hierarchical throttle state */
        walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
 
-       if (!cfs_rq->load.weight)
+       /* Nothing to run but something to decay (on_list)? Complete the branch */
+       if (!cfs_rq->load.weight) {
+               if (cfs_rq->on_list)
+                       goto unthrottle_throttle;
                return;
+       }
 
        task_delta = cfs_rq->h_nr_running;
        idle_task_delta = cfs_rq->idle_h_nr_running;
index 952741f6d0f9f24f2364c8d67b8b0bee94c228bd..487bf4f5dadf476171c081fd8938a88d22993423 100644 (file)
@@ -426,22 +426,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
         */
        rcu_read_lock();
        ucounts = task_ucounts(t);
-       sigpending = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
-       switch (sigpending) {
-       case 1:
-               if (likely(get_ucounts(ucounts)))
-                       break;
-               fallthrough;
-       case LONG_MAX:
-               /*
-                * we need to decrease the ucount in the userns tree on any
-                * failure to avoid counts leaking.
-                */
-               dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
-               rcu_read_unlock();
-               return NULL;
-       }
+       sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
        rcu_read_unlock();
+       if (!sigpending)
+               return NULL;
 
        if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
                q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
@@ -450,8 +438,7 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
        }
 
        if (unlikely(q == NULL)) {
-               if (dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1))
-                       put_ucounts(ucounts);
+               dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
        } else {
                INIT_LIST_HEAD(&q->list);
                q->flags = sigqueue_flags;
@@ -464,8 +451,8 @@ static void __sigqueue_free(struct sigqueue *q)
 {
        if (q->flags & SIGQUEUE_PREALLOC)
                return;
-       if (q->ucounts && dec_rlimit_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING, 1)) {
-               put_ucounts(q->ucounts);
+       if (q->ucounts) {
+               dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
                q->ucounts = NULL;
        }
        kmem_cache_free(sigqueue_cachep, q);
index 7efbc8aaf7f647dfc32bad4f98533d5ab2cd3273..635fbdc9d589bedaa557837ec1109bcc35b857a7 100644 (file)
@@ -6977,7 +6977,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
        struct ftrace_ops *op;
        int bit;
 
-       bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START, TRACE_LIST_MAX);
+       bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
        if (bit < 0)
                return;
 
@@ -7052,7 +7052,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
 {
        int bit;
 
-       bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START, TRACE_LIST_MAX);
+       bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
        if (bit < 0)
                return;
 
index 7896d30d90f76f023d40c9e9f2cea2f1cbdc3878..bc677cd6422401889f77b7d60ba50fbb2ad1bd5a 100644 (file)
@@ -1744,16 +1744,15 @@ void latency_fsnotify(struct trace_array *tr)
        irq_work_queue(&tr->fsnotify_irqwork);
 }
 
-/*
- * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
- *  defined(CONFIG_FSNOTIFY)
- */
-#else
+#elif defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
+       || defined(CONFIG_OSNOISE_TRACER)
 
 #define trace_create_maxlat_file(tr, d_tracer)                         \
        trace_create_file("tracing_max_latency", 0644, d_tracer,        \
                          &tr->max_latency, &tracing_max_lat_fops)
 
+#else
+#define trace_create_maxlat_file(tr, d_tracer)  do { } while (0)
 #endif
 
 #ifdef CONFIG_TRACER_MAX_TRACE
@@ -9473,9 +9472,7 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
 
        create_trace_options_dir(tr);
 
-#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
        trace_create_maxlat_file(tr, d_tracer);
-#endif
 
        if (ftrace_create_function_files(tr, d_tracer))
                MEM_FAIL(1, "Could not allocate function filter files");
index 3044b762cbd733545ee25a83c91760e28a4825b9..5c5f208c15d341fc06be6bc8367626aa69954cdb 100644 (file)
@@ -119,10 +119,58 @@ static bool eprobe_dyn_event_match(const char *system, const char *event,
                        int argc, const char **argv, struct dyn_event *ev)
 {
        struct trace_eprobe *ep = to_trace_eprobe(ev);
+       const char *slash;
 
-       return strcmp(trace_probe_name(&ep->tp), event) == 0 &&
-           (!system || strcmp(trace_probe_group_name(&ep->tp), system) == 0) &&
-           trace_probe_match_command_args(&ep->tp, argc, argv);
+       /*
+        * We match the following:
+        *  event only                  - match all eprobes with event name
+        *  system and event only       - match all system/event probes
+        *
+        * The below has the above satisfied with more arguments:
+        *
+        *  attached system/event       - If the arg has the system and event
+        *                                the probe is attached to, match
+        *                                probes with the attachment.
+        *
+        *  If any more args are given, then it requires a full match.
+        */
+
+       /*
+        * If system exists, but this probe is not part of that system
+        * do not match.
+        */
+       if (system && strcmp(trace_probe_group_name(&ep->tp), system) != 0)
+               return false;
+
+       /* Must match the event name */
+       if (strcmp(trace_probe_name(&ep->tp), event) != 0)
+               return false;
+
+       /* No arguments match all */
+       if (argc < 1)
+               return true;
+
+       /* First argument is the system/event the probe is attached to */
+
+       slash = strchr(argv[0], '/');
+       if (!slash)
+               slash = strchr(argv[0], '.');
+       if (!slash)
+               return false;
+
+       if (strncmp(ep->event_system, argv[0], slash - argv[0]))
+               return false;
+       if (strcmp(ep->event_name, slash + 1))
+               return false;
+
+       argc--;
+       argv++;
+
+       /* If there are no other args, then match */
+       if (argc < 1)
+               return true;
+
+       return trace_probe_match_command_args(&ep->tp, argc, argv);
 }
 
 static struct dyn_event_operations eprobe_dyn_event_ops = {
@@ -632,6 +680,13 @@ static int disable_eprobe(struct trace_eprobe *ep,
 
        trace_event_trigger_enable_disable(file, 0);
        update_cond_flag(file);
+
+       /* Make sure nothing is using the edata or trigger */
+       tracepoint_synchronize_unregister();
+
+       kfree(edata);
+       kfree(trigger);
+
        return 0;
 }
 
@@ -849,8 +904,8 @@ static int __trace_eprobe_create(int argc, const char *argv[])
 
        if (IS_ERR(ep)) {
                ret = PTR_ERR(ep);
-               /* This must return -ENOMEM, else there is a bug */
-               WARN_ON_ONCE(ret != -ENOMEM);
+               /* This must return -ENOMEM or misssing event, else there is a bug */
+               WARN_ON_ONCE(ret != -ENOMEM && ret != -ENODEV);
                ep = NULL;
                goto error;
        }
index a6061a69aa84e2c88070f090b4a2496e02615b8a..f01e442716e2f19d8dc920a3812b31d9dcaf5d80 100644 (file)
@@ -2506,7 +2506,7 @@ find_synthetic_field_var(struct hist_trigger_data *target_hist_data,
  * events.  However, for convenience, users are allowed to directly
  * specify an event field in an action, which will be automatically
  * converted into a variable on their behalf.
-
+ *
  * If a user specifies a field on an event that isn't the event the
  * histogram currently being defined (the target event histogram), the
  * only way that can be accomplished is if a new hist trigger is
index bb51849e6375288493d1429e07a49ee8de925986..eb03f3c68375df0cfaae6d61e5c3ed04bc9805a7 100644 (file)
@@ -284,6 +284,55 @@ bool dec_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v)
        return (new == 0);
 }
 
+static void do_dec_rlimit_put_ucounts(struct ucounts *ucounts,
+                               struct ucounts *last, enum ucount_type type)
+{
+       struct ucounts *iter, *next;
+       for (iter = ucounts; iter != last; iter = next) {
+               long dec = atomic_long_add_return(-1, &iter->ucount[type]);
+               WARN_ON_ONCE(dec < 0);
+               next = iter->ns->ucounts;
+               if (dec == 0)
+                       put_ucounts(iter);
+       }
+}
+
+void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum ucount_type type)
+{
+       do_dec_rlimit_put_ucounts(ucounts, NULL, type);
+}
+
+long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum ucount_type type)
+{
+       /* Caller must hold a reference to ucounts */
+       struct ucounts *iter;
+       long dec, ret = 0;
+
+       for (iter = ucounts; iter; iter = iter->ns->ucounts) {
+               long max = READ_ONCE(iter->ns->ucount_max[type]);
+               long new = atomic_long_add_return(1, &iter->ucount[type]);
+               if (new < 0 || new > max)
+                       goto unwind;
+               if (iter == ucounts)
+                       ret = new;
+               /*
+                * Grab an extra ucount reference for the caller when
+                * the rlimit count was previously 0.
+                */
+               if (new != 1)
+                       continue;
+               if (!get_ucounts(iter))
+                       goto dec_unwind;
+       }
+       return ret;
+dec_unwind:
+       dec = atomic_long_add_return(-1, &iter->ucount[type]);
+       WARN_ON_ONCE(dec < 0);
+unwind:
+       do_dec_rlimit_put_ucounts(ucounts, iter, type);
+       return 0;
+}
+
 bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max)
 {
        struct ucounts *iter;
index 33a6b4a2443d274de3dfb1f31c11a460da118747..1b3eb1e9531f4ac71ebced9bac749ecea747064f 100644 (file)
@@ -4830,8 +4830,16 @@ void show_workqueue_state(void)
 
                for_each_pwq(pwq, wq) {
                        raw_spin_lock_irqsave(&pwq->pool->lock, flags);
-                       if (pwq->nr_active || !list_empty(&pwq->inactive_works))
+                       if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
+                               /*
+                                * Defer printing to avoid deadlocks in console
+                                * drivers that queue work while holding locks
+                                * also taken in their write paths.
+                                */
+                               printk_deferred_enter();
                                show_pwq(pwq);
+                               printk_deferred_exit();
+                       }
                        raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
                        /*
                         * We could be printing a lot from atomic context, e.g.
@@ -4849,7 +4857,12 @@ void show_workqueue_state(void)
                raw_spin_lock_irqsave(&pool->lock, flags);
                if (pool->nr_workers == pool->nr_idle)
                        goto next_pool;
-
+               /*
+                * Defer printing to avoid deadlocks in console drivers that
+                * queue work while holding locks also taken in their write
+                * paths.
+                */
+               printk_deferred_enter();
                pr_info("pool %d:", pool->id);
                pr_cont_pool_info(pool);
                pr_cont(" hung=%us workers=%d",
@@ -4864,6 +4877,7 @@ void show_workqueue_state(void)
                        first = false;
                }
                pr_cont("\n");
+               printk_deferred_exit();
        next_pool:
                raw_spin_unlock_irqrestore(&pool->lock, flags);
                /*
index 5efd1b435a37c5d138d15c608cb9e7b685ef20f6..a841be5244ac666cc727168c750c0d174f8d0698 100644 (file)
@@ -351,7 +351,7 @@ obj-$(CONFIG_OBJAGG) += objagg.o
 obj-$(CONFIG_PLDMFW) += pldmfw/
 
 # KUnit tests
-CFLAGS_bitfield_kunit.o := $(call cc-option,-Wframe-larger-than=10240)
+CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN)
 obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
 obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
 obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
index cdbe54b165017e4a70c9e2aa12d021911519f2a2..e14a18af573dd71c8e0ad1641d2d53ae2a0c405f 100644 (file)
@@ -116,8 +116,8 @@ static void kfree_at_end(struct kunit *test, const void *to_free)
        /* kfree() handles NULL already, but avoid allocating a no-op cleanup. */
        if (IS_ERR_OR_NULL(to_free))
                return;
-       kunit_alloc_and_get_resource(test, NULL, kfree_res_free, GFP_KERNEL,
-                                    (void *)to_free);
+       kunit_alloc_resource(test, NULL, kfree_res_free, GFP_KERNEL,
+                            (void *)to_free);
 }
 
 static struct kunit_suite *alloc_fake_suite(struct kunit *test,
index 5e9ef0fc261e9747717f112a53bafd8f96df7393..92192cb086c79a2f675e9b412b938ada20cb216f 100644 (file)
@@ -2700,12 +2700,14 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                if (mapping) {
                        int nr = thp_nr_pages(head);
 
-                       if (PageSwapBacked(head))
+                       if (PageSwapBacked(head)) {
                                __mod_lruvec_page_state(head, NR_SHMEM_THPS,
                                                        -nr);
-                       else
+                       } else {
                                __mod_lruvec_page_state(head, NR_FILE_THPS,
                                                        -nr);
+                               filemap_nr_thps_dec(mapping);
+                       }
                }
 
                __split_huge_page(page, list, end);
index 184dcd2e5d9987a41315ebcf53d1dfa127e59913..5096500b2647300268bd2f56b9fc01a9c76826da 100644 (file)
@@ -932,6 +932,9 @@ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
  * covered by the memory map. The struct page representing NOMAP memory
  * frames in the memory map will be PageReserved()
  *
+ * Note: if the memory being marked %MEMBLOCK_NOMAP was allocated from
+ * memblock, the caller must inform kmemleak to ignore that memory
+ *
  * Return: 0 on success, -errno on failure.
  */
 int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
@@ -1687,7 +1690,7 @@ void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
        if (!size)
                return;
 
-       if (memblock.memory.cnt <= 1) {
+       if (!memblock_memory->total_size) {
                pr_warn("%s: No memory registered yet\n", __func__);
                return;
        }
index 1592b081c58ef6dd63c6f075ad24722f2be7cb5d..d12e0608fced235dc9137d0628437046299c7cfc 100644 (file)
@@ -856,16 +856,6 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
                goto out;
        }
 
-       if (flags & MPOL_F_NUMA_BALANCING) {
-               if (new && new->mode == MPOL_BIND) {
-                       new->flags |= (MPOL_F_MOF | MPOL_F_MORON);
-               } else {
-                       ret = -EINVAL;
-                       mpol_put(new);
-                       goto out;
-               }
-       }
-
        ret = mpol_set_nodemask(new, nodes, scratch);
        if (ret) {
                mpol_put(new);
@@ -1458,7 +1448,11 @@ static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
                return -EINVAL;
        if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
                return -EINVAL;
-
+       if (*flags & MPOL_F_NUMA_BALANCING) {
+               if (*mode != MPOL_BIND)
+                       return -EINVAL;
+               *flags |= (MPOL_F_MOF | MPOL_F_MORON);
+       }
        return 0;
 }
 
index a6a7743ee98f002c7f0ff3d410a2f99392c321cd..1852d787e6ab697d35c9e5a6992d2634f8695720 100644 (file)
@@ -3066,7 +3066,7 @@ void migrate_vma_finalize(struct migrate_vma *migrate)
 EXPORT_SYMBOL(migrate_vma_finalize);
 #endif /* CONFIG_DEVICE_PRIVATE */
 
-#if defined(CONFIG_MEMORY_HOTPLUG)
+#if defined(CONFIG_HOTPLUG_CPU)
 /* Disable reclaim-based migration. */
 static void __disable_all_migrate_targets(void)
 {
@@ -3208,25 +3208,6 @@ static void set_migration_target_nodes(void)
        put_online_mems();
 }
 
-/*
- * React to hotplug events that might affect the migration targets
- * like events that online or offline NUMA nodes.
- *
- * The ordering is also currently dependent on which nodes have
- * CPUs.  That means we need CPU on/offline notification too.
- */
-static int migration_online_cpu(unsigned int cpu)
-{
-       set_migration_target_nodes();
-       return 0;
-}
-
-static int migration_offline_cpu(unsigned int cpu)
-{
-       set_migration_target_nodes();
-       return 0;
-}
-
 /*
  * This leaves migrate-on-reclaim transiently disabled between
  * the MEM_GOING_OFFLINE and MEM_OFFLINE events.  This runs
@@ -3239,8 +3220,18 @@ static int migration_offline_cpu(unsigned int cpu)
  * set_migration_target_nodes().
  */
 static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
-                                                unsigned long action, void *arg)
+                                                unsigned long action, void *_arg)
 {
+       struct memory_notify *arg = _arg;
+
+       /*
+        * Only update the node migration order when a node is
+        * changing status, like online->offline.  This avoids
+        * the overhead of synchronize_rcu() in most cases.
+        */
+       if (arg->status_change_nid < 0)
+               return notifier_from_errno(0);
+
        switch (action) {
        case MEM_GOING_OFFLINE:
                /*
@@ -3274,13 +3265,31 @@ static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
        return notifier_from_errno(0);
 }
 
+/*
+ * React to hotplug events that might affect the migration targets
+ * like events that online or offline NUMA nodes.
+ *
+ * The ordering is also currently dependent on which nodes have
+ * CPUs.  That means we need CPU on/offline notification too.
+ */
+static int migration_online_cpu(unsigned int cpu)
+{
+       set_migration_target_nodes();
+       return 0;
+}
+
+static int migration_offline_cpu(unsigned int cpu)
+{
+       set_migration_target_nodes();
+       return 0;
+}
+
 static int __init migrate_on_reclaim_init(void)
 {
        int ret;
 
-       ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "migrate on reclaim",
-                               migration_online_cpu,
-                               migration_offline_cpu);
+       ret = cpuhp_setup_state_nocalls(CPUHP_MM_DEMOTION_DEAD, "mm/demotion:offline",
+                                       NULL, migration_offline_cpu);
        /*
         * In the unlikely case that this fails, the automatic
         * migration targets may become suboptimal for nodes
@@ -3288,9 +3297,12 @@ static int __init migrate_on_reclaim_init(void)
         * rare case, do not bother trying to do anything special.
         */
        WARN_ON(ret < 0);
+       ret = cpuhp_setup_state(CPUHP_AP_MM_DEMOTION_ONLINE, "mm/demotion:online",
+                               migration_online_cpu, NULL);
+       WARN_ON(ret < 0);
 
        hotplug_memory_notifier(migrate_on_reclaim_callback, 100);
        return 0;
 }
 late_initcall(migrate_on_reclaim_init);
-#endif /* CONFIG_MEMORY_HOTPLUG */
+#endif /* CONFIG_HOTPLUG_CPU */
index dfb91653d359eecb15cc57f5e36d702d3b4b13a9..2a52fd9ed464aa7d780dc50440b7f09989ad15ce 100644 (file)
@@ -269,7 +269,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid)
        total_usage += table_size;
        return 0;
 }
-#ifdef CONFIG_MEMORY_HOTPLUG
+
 static void free_page_ext(void *addr)
 {
        if (is_vmalloc_addr(addr)) {
@@ -374,8 +374,6 @@ static int __meminit page_ext_callback(struct notifier_block *self,
        return notifier_from_errno(ret);
 }
 
-#endif
-
 void __init page_ext_init(void)
 {
        unsigned long pfn;
index 1fea68b8d5a6f90d0b7f7e32ecabcf3ba556ebe7..c2dda408bb3620e651754fd45232e9b4be63944f 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/secretmem.h>
 #include <linux/set_memory.h>
 #include <linux/sched/signal.h>
-#include <linux/refcount.h>
 
 #include <uapi/linux/magic.h>
 
@@ -41,11 +40,11 @@ module_param_named(enable, secretmem_enable, bool, 0400);
 MODULE_PARM_DESC(secretmem_enable,
                 "Enable secretmem and memfd_secret(2) system call");
 
-static refcount_t secretmem_users;
+static atomic_t secretmem_users;
 
 bool secretmem_active(void)
 {
-       return !!refcount_read(&secretmem_users);
+       return !!atomic_read(&secretmem_users);
 }
 
 static vm_fault_t secretmem_fault(struct vm_fault *vmf)
@@ -104,7 +103,7 @@ static const struct vm_operations_struct secretmem_vm_ops = {
 
 static int secretmem_release(struct inode *inode, struct file *file)
 {
-       refcount_dec(&secretmem_users);
+       atomic_dec(&secretmem_users);
        return 0;
 }
 
@@ -204,6 +203,8 @@ SYSCALL_DEFINE1(memfd_secret, unsigned int, flags)
 
        if (flags & ~(SECRETMEM_FLAGS_MASK | O_CLOEXEC))
                return -EINVAL;
+       if (atomic_read(&secretmem_users) < 0)
+               return -ENFILE;
 
        fd = get_unused_fd_flags(flags & O_CLOEXEC);
        if (fd < 0)
@@ -218,7 +219,7 @@ SYSCALL_DEFINE1(memfd_secret, unsigned int, flags)
        file->f_flags |= O_LARGEFILE;
 
        fd_install(fd, file);
-       refcount_inc(&secretmem_users);
+       atomic_inc(&secretmem_users);
        return fd;
 
 err_put_fd:
index d0f725637663000ce14259df2e16b7f08cb7e504..874b3f8fe80da2525e8f197a8543f63c9a665543 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1095,7 +1095,7 @@ static int slab_offline_cpu(unsigned int cpu)
        return 0;
 }
 
-#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
+#if defined(CONFIG_NUMA)
 /*
  * Drains freelist for a node on each slab cache, used for memory hot-remove.
  * Returns -EBUSY if all objects cannot be drained so that the node is not
@@ -1157,7 +1157,7 @@ static int __meminit slab_memory_callback(struct notifier_block *self,
 out:
        return notifier_from_errno(ret);
 }
-#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
+#endif /* CONFIG_NUMA */
 
 /*
  * swap the static kmem_cache_node with kmalloced memory
index 3d2025f7163b295c45c2b26a8c5260414b2fc238..d8f77346376d871666ddf57f11db08b9c74f871d 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1701,7 +1701,8 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s,
 }
 
 static inline bool slab_free_freelist_hook(struct kmem_cache *s,
-                                          void **head, void **tail)
+                                          void **head, void **tail,
+                                          int *cnt)
 {
 
        void *object;
@@ -1728,6 +1729,12 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
                        *head = object;
                        if (!*tail)
                                *tail = object;
+               } else {
+                       /*
+                        * Adjust the reconstructed freelist depth
+                        * accordingly if object's reuse is delayed.
+                        */
+                       --(*cnt);
                }
        } while (object != old_tail);
 
@@ -3413,7 +3420,9 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
        struct kmem_cache_cpu *c;
        unsigned long tid;
 
-       memcg_slab_free_hook(s, &head, 1);
+       /* memcg_slab_free_hook() is already called for bulk free. */
+       if (!tail)
+               memcg_slab_free_hook(s, &head, 1);
 redo:
        /*
         * Determine the currently cpus per cpu slab.
@@ -3480,7 +3489,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
         * With KASAN enabled slab_free_freelist_hook modifies the freelist
         * to remove objects, whose reuse must be delayed.
         */
-       if (slab_free_freelist_hook(s, &head, &tail))
+       if (slab_free_freelist_hook(s, &head, &tail, &cnt))
                do_slab_free(s, page, head, tail, cnt, addr);
 }
 
@@ -4203,8 +4212,8 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
        if (alloc_kmem_cache_cpus(s))
                return 0;
 
-       free_kmem_cache_nodes(s);
 error:
+       __kmem_cache_release(s);
        return -EINVAL;
 }
 
@@ -4880,13 +4889,15 @@ int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
                return 0;
 
        err = sysfs_slab_add(s);
-       if (err)
+       if (err) {
                __kmem_cache_release(s);
+               return err;
+       }
 
        if (s->flags & SLAB_STORE_USER)
                debugfs_slab_add(s);
 
-       return err;
+       return 0;
 }
 
 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
@@ -6108,9 +6119,14 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep)
        struct kmem_cache *s = file_inode(filep)->i_private;
        unsigned long *obj_map;
 
+       if (!t)
+               return -ENOMEM;
+
        obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
-       if (!obj_map)
+       if (!obj_map) {
+               seq_release_private(inode, filep);
                return -ENOMEM;
+       }
 
        if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0)
                alloc = TRACK_ALLOC;
@@ -6119,6 +6135,7 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep)
 
        if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) {
                bitmap_free(obj_map);
+               seq_release_private(inode, filep);
                return -ENOMEM;
        }
 
index 2eb0e55ef54d221f61e1b721b3c9016f3f6a9780..b5f4ef35357c808a52f3dfc70dad00b8513984e9 100644 (file)
@@ -552,6 +552,12 @@ static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
        __skb->gso_segs = skb_shinfo(skb)->gso_segs;
 }
 
+static struct proto bpf_dummy_proto = {
+       .name   = "bpf_dummy",
+       .owner  = THIS_MODULE,
+       .obj_size = sizeof(struct sock),
+};
+
 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
                          union bpf_attr __user *uattr)
 {
@@ -596,20 +602,19 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
                break;
        }
 
-       sk = kzalloc(sizeof(struct sock), GFP_USER);
+       sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
        if (!sk) {
                kfree(data);
                kfree(ctx);
                return -ENOMEM;
        }
-       sock_net_set(sk, net);
        sock_init_data(NULL, sk);
 
        skb = build_skb(data, 0);
        if (!skb) {
                kfree(data);
                kfree(ctx);
-               kfree(sk);
+               sk_free(sk);
                return -ENOMEM;
        }
        skb->sk = sk;
@@ -682,8 +687,7 @@ out:
        if (dev && dev != net->loopback_dev)
                dev_put(dev);
        kfree_skb(skb);
-       bpf_sk_storage_free(sk);
-       kfree(sk);
+       sk_free(sk);
        kfree(ctx);
        return ret;
 }
index 3523c8c7068fdd0dd4847951290d5c1390f0153d..f3d751105343ce035c3885ded7f778b07ada4ff0 100644 (file)
@@ -1677,8 +1677,6 @@ static void br_multicast_update_querier(struct net_bridge_mcast *brmctx,
                                        int ifindex,
                                        struct br_ip *saddr)
 {
-       lockdep_assert_held_once(&brmctx->br->multicast_lock);
-
        write_seqcount_begin(&querier->seq);
        querier->port_ifidx = ifindex;
        memcpy(&querier->addr, saddr, sizeof(*saddr));
@@ -3867,13 +3865,13 @@ void br_multicast_ctx_init(struct net_bridge *br,
 
        brmctx->ip4_other_query.delay_time = 0;
        brmctx->ip4_querier.port_ifidx = 0;
-       seqcount_init(&brmctx->ip4_querier.seq);
+       seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock);
        brmctx->multicast_igmp_version = 2;
 #if IS_ENABLED(CONFIG_IPV6)
        brmctx->multicast_mld_version = 1;
        brmctx->ip6_other_query.delay_time = 0;
        brmctx->ip6_querier.port_ifidx = 0;
-       seqcount_init(&brmctx->ip6_querier.seq);
+       seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock);
 #endif
 
        timer_setup(&brmctx->ip4_mc_router_timer,
index 6c58fc14d2cb2de8bcd8364fc5e766247aba2e97..5c6c4305ed235891b2ed5c5a17eb8382f2aec1a0 100644 (file)
@@ -1666,7 +1666,8 @@ static size_t br_get_linkxstats_size(const struct net_device *dev, int attr)
        }
 
        return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) +
-              nla_total_size(sizeof(struct br_mcast_stats)) +
+              nla_total_size_64bit(sizeof(struct br_mcast_stats)) +
+              (p ? nla_total_size_64bit(sizeof(p->stp_xstats)) : 0) +
               nla_total_size(0);
 }
 
index b4cef3a97f12b48cb429eb64765124787a471b72..37ca76406f1e8258f1f603df35f4101acf34092a 100644 (file)
@@ -82,7 +82,7 @@ struct bridge_mcast_other_query {
 struct bridge_mcast_querier {
        struct br_ip addr;
        int port_ifidx;
-       seqcount_t seq;
+       seqcount_spinlock_t seq;
 };
 
 /* IGMP/MLD statistics */
@@ -1125,9 +1125,7 @@ static inline unsigned long br_multicast_lmqt(const struct net_bridge_mcast *brm
 
 static inline unsigned long br_multicast_gmi(const struct net_bridge_mcast *brmctx)
 {
-       /* use the RFC default of 2 for QRV */
-       return 2 * brmctx->multicast_query_interval +
-              brmctx->multicast_query_response_interval;
+       return brmctx->multicast_membership_interval;
 }
 
 static inline bool
index 83d1798dfbb444ebe0f4ab0b0262dc7b2052ebb0..ba045f35114dd90853fb0165f1f769c60fb126e8 100644 (file)
@@ -926,7 +926,9 @@ static int translate_table(struct net *net, const char *name,
                        return -ENOMEM;
                for_each_possible_cpu(i) {
                        newinfo->chainstack[i] =
-                         vmalloc(array_size(udc_cnt, sizeof(*(newinfo->chainstack[0]))));
+                         vmalloc_node(array_size(udc_cnt,
+                                         sizeof(*(newinfo->chainstack[0]))),
+                                      cpu_to_node(i));
                        if (!newinfo->chainstack[i]) {
                                while (i)
                                        vfree(newinfo->chainstack[--i]);
index caaa532ece9492eae5918953c1ec5709799e23ae..df6968b28bf41e53a11eaf60a32e8817da54e05e 100644 (file)
@@ -121,7 +121,7 @@ enum {
 struct tpcon {
        int idx;
        int len;
-       u8 state;
+       u32 state;
        u8 bs;
        u8 sn;
        u8 ll_dl;
@@ -848,6 +848,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 {
        struct sock *sk = sock->sk;
        struct isotp_sock *so = isotp_sk(sk);
+       u32 old_state = so->tx.state;
        struct sk_buff *skb;
        struct net_device *dev;
        struct canfd_frame *cf;
@@ -860,45 +861,55 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
                return -EADDRNOTAVAIL;
 
        /* we do not support multiple buffers - for now */
-       if (so->tx.state != ISOTP_IDLE || wq_has_sleeper(&so->wait)) {
-               if (msg->msg_flags & MSG_DONTWAIT)
-                       return -EAGAIN;
+       if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE ||
+           wq_has_sleeper(&so->wait)) {
+               if (msg->msg_flags & MSG_DONTWAIT) {
+                       err = -EAGAIN;
+                       goto err_out;
+               }
 
                /* wait for complete transmission of current pdu */
-               wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+               err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+               if (err)
+                       goto err_out;
        }
 
-       if (!size || size > MAX_MSG_LENGTH)
-               return -EINVAL;
+       if (!size || size > MAX_MSG_LENGTH) {
+               err = -EINVAL;
+               goto err_out;
+       }
 
        /* take care of a potential SF_DL ESC offset for TX_DL > 8 */
        off = (so->tx.ll_dl > CAN_MAX_DLEN) ? 1 : 0;
 
        /* does the given data fit into a single frame for SF_BROADCAST? */
        if ((so->opt.flags & CAN_ISOTP_SF_BROADCAST) &&
-           (size > so->tx.ll_dl - SF_PCI_SZ4 - ae - off))
-               return -EINVAL;
+           (size > so->tx.ll_dl - SF_PCI_SZ4 - ae - off)) {
+               err = -EINVAL;
+               goto err_out;
+       }
 
        err = memcpy_from_msg(so->tx.buf, msg, size);
        if (err < 0)
-               return err;
+               goto err_out;
 
        dev = dev_get_by_index(sock_net(sk), so->ifindex);
-       if (!dev)
-               return -ENXIO;
+       if (!dev) {
+               err = -ENXIO;
+               goto err_out;
+       }
 
        skb = sock_alloc_send_skb(sk, so->ll.mtu + sizeof(struct can_skb_priv),
                                  msg->msg_flags & MSG_DONTWAIT, &err);
        if (!skb) {
                dev_put(dev);
-               return err;
+               goto err_out;
        }
 
        can_skb_reserve(skb);
        can_skb_prv(skb)->ifindex = dev->ifindex;
        can_skb_prv(skb)->skbcnt = 0;
 
-       so->tx.state = ISOTP_SENDING;
        so->tx.len = size;
        so->tx.idx = 0;
 
@@ -954,15 +965,25 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
        if (err) {
                pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
                               __func__, ERR_PTR(err));
-               return err;
+               goto err_out;
        }
 
        if (wait_tx_done) {
                /* wait for complete transmission of current pdu */
                wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+
+               if (sk->sk_err)
+                       return -sk->sk_err;
        }
 
        return size;
+
+err_out:
+       so->tx.state = old_state;
+       if (so->tx.state == ISOTP_IDLE)
+               wake_up_interruptible(&so->wait);
+
+       return err;
 }
 
 static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
index f6df20808f5ecafdf46bf71d217e1bf0760ebb35..16af1a7f80f60e18b5526c973f66e98821786a78 100644 (file)
@@ -330,6 +330,7 @@ int j1939_session_activate(struct j1939_session *session);
 void j1939_tp_schedule_txtimer(struct j1939_session *session, int msec);
 void j1939_session_timers_cancel(struct j1939_session *session);
 
+#define J1939_MIN_TP_PACKET_SIZE 9
 #define J1939_MAX_TP_PACKET_SIZE (7 * 0xff)
 #define J1939_MAX_ETP_PACKET_SIZE (7 * 0x00ffffff)
 
index 08c8606cfd9c74f915e3992e013aa2bdd1982019..9bc55ecb37f9fafa751e46d4a6914b9f05d0f805 100644 (file)
@@ -249,11 +249,14 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
        struct j1939_priv *priv, *priv_new;
        int ret;
 
-       priv = j1939_priv_get_by_ndev(ndev);
+       spin_lock(&j1939_netdev_lock);
+       priv = j1939_priv_get_by_ndev_locked(ndev);
        if (priv) {
                kref_get(&priv->rx_kref);
+               spin_unlock(&j1939_netdev_lock);
                return priv;
        }
+       spin_unlock(&j1939_netdev_lock);
 
        priv = j1939_priv_create(ndev);
        if (!priv)
@@ -269,10 +272,10 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
                /* Someone was faster than us, use their priv and roll
                 * back our's.
                 */
+               kref_get(&priv_new->rx_kref);
                spin_unlock(&j1939_netdev_lock);
                dev_put(ndev);
                kfree(priv);
-               kref_get(&priv_new->rx_kref);
                return priv_new;
        }
        j1939_priv_set(ndev, priv);
index bb5c4b8979bec96e354c244e8fcae8421be01558..6c0a0ebdd024c206e99d1432bab805fb6aae5f3f 100644 (file)
@@ -1237,12 +1237,11 @@ static enum hrtimer_restart j1939_tp_rxtimer(struct hrtimer *hrtimer)
                session->err = -ETIME;
                j1939_session_deactivate(session);
        } else {
-               netdev_alert(priv->ndev, "%s: 0x%p: rx timeout, send abort\n",
-                            __func__, session);
-
                j1939_session_list_lock(session->priv);
                if (session->state >= J1939_SESSION_ACTIVE &&
                    session->state < J1939_SESSION_ACTIVE_MAX) {
+                       netdev_alert(priv->ndev, "%s: 0x%p: rx timeout, send abort\n",
+                                    __func__, session);
                        j1939_session_get(session);
                        hrtimer_start(&session->rxtimer,
                                      ms_to_ktime(J1939_XTP_ABORT_TIMEOUT_MS),
@@ -1609,6 +1608,8 @@ j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
                        abort = J1939_XTP_ABORT_FAULT;
                else if (len > priv->tp_max_packet_size)
                        abort = J1939_XTP_ABORT_RESOURCE;
+               else if (len < J1939_MIN_TP_PACKET_SIZE)
+                       abort = J1939_XTP_ABORT_FAULT;
        }
 
        if (abort != J1939_XTP_NO_ABORT) {
@@ -1789,6 +1790,7 @@ static void j1939_xtp_rx_dpo(struct j1939_priv *priv, struct sk_buff *skb,
 static void j1939_xtp_rx_dat_one(struct j1939_session *session,
                                 struct sk_buff *skb)
 {
+       enum j1939_xtp_abort abort = J1939_XTP_ABORT_FAULT;
        struct j1939_priv *priv = session->priv;
        struct j1939_sk_buff_cb *skcb, *se_skcb;
        struct sk_buff *se_skb = NULL;
@@ -1803,9 +1805,11 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
 
        skcb = j1939_skb_to_cb(skb);
        dat = skb->data;
-       if (skb->len <= 1)
+       if (skb->len != 8) {
                /* makes no sense */
+               abort = J1939_XTP_ABORT_UNEXPECTED_DATA;
                goto out_session_cancel;
+       }
 
        switch (session->last_cmd) {
        case 0xff:
@@ -1904,7 +1908,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
  out_session_cancel:
        kfree_skb(se_skb);
        j1939_session_timers_cancel(session);
-       j1939_session_cancel(session, J1939_XTP_ABORT_FAULT);
+       j1939_session_cancel(session, abort);
        j1939_session_put(session);
 }
 
index 8c39283c26ae6470b47a5503ccd5688b265137d0..f0cb38344126a0ce327a9afc7aafffb07b2a0cfb 100644 (file)
@@ -50,6 +50,11 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
        if (addr_len > MAX_ADDR_LEN)
                return -EINVAL;
 
+       ha = list_first_entry(&list->list, struct netdev_hw_addr, list);
+       if (ha && !memcmp(addr, ha->addr, addr_len) &&
+           (!addr_type || addr_type == ha->type))
+               goto found_it;
+
        while (*ins_point) {
                int diff;
 
@@ -64,6 +69,7 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
                } else if (diff > 0) {
                        ins_point = &parent->rb_right;
                } else {
+found_it:
                        if (exclusive)
                                return -EEXIST;
                        if (global) {
index eab5fc88a002871d7487825c6895a97cb12a4af8..d8b9dbabd4a439137c408fb548ea058b1349c3a1 100644 (file)
@@ -77,8 +77,8 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
        struct rtnl_link_stats64 temp;
        const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
 
-       seq_printf(seq, "%9s: %16llu %12llu %4llu %6llu %4llu %5llu %10llu %9llu "
-                  "%16llu %12llu %4llu %6llu %4llu %5llu %7llu %10llu\n",
+       seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
+                  "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
                   dev->name, stats->rx_bytes, stats->rx_packets,
                   stats->rx_errors,
                   stats->rx_dropped + stats->rx_missed_errors,
@@ -103,11 +103,11 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
 static int dev_seq_show(struct seq_file *seq, void *v)
 {
        if (v == SEQ_START_TOKEN)
-               seq_puts(seq, "Interface|                            Receive                   "
-                             "                    |                                 Transmit\n"
-                             "         |            bytes      packets errs   drop fifo frame "
-                             "compressed multicast|            bytes      packets errs "
-                             "  drop fifo colls carrier compressed\n");
+               seq_puts(seq, "Inter-|   Receive                            "
+                             "                    |  Transmit\n"
+                             " face |bytes    packets errs drop fifo frame "
+                             "compressed multicast|bytes    packets errs "
+                             "drop fifo colls carrier compressed\n");
        else
                dev_seq_printf_stats(seq, v);
        return 0;
@@ -259,14 +259,14 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
        struct packet_type *pt = v;
 
        if (v == SEQ_START_TOKEN)
-               seq_puts(seq, "Type      Device      Function\n");
+               seq_puts(seq, "Type Device      Function\n");
        else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
                if (pt->type == htons(ETH_P_ALL))
                        seq_puts(seq, "ALL ");
                else
                        seq_printf(seq, "%04x", ntohs(pt->type));
 
-               seq_printf(seq, "      %-9s   %ps\n",
+               seq_printf(seq, " %-8s %ps\n",
                           pt->dev ? pt->dev->name : "", pt->func);
        }
 
@@ -327,14 +327,12 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
        struct netdev_hw_addr *ha;
        struct net_device *dev = v;
 
-       if (v == SEQ_START_TOKEN) {
-               seq_puts(seq, "Ifindex Interface Refcount Global_use Address\n");
+       if (v == SEQ_START_TOKEN)
                return 0;
-       }
 
        netif_addr_lock_bh(dev);
        netdev_for_each_mc_addr(ha, dev) {
-               seq_printf(seq, "%-7d %-9s %-8d %-10d %*phN\n",
+               seq_printf(seq, "%-4d %-15s %-5d %-5d %*phN\n",
                           dev->ifindex, dev->name,
                           ha->refcount, ha->global_use,
                           (int)dev->addr_len, ha->addr);
index 972c8cb303a514758278307cd9fcb974e37f2b96..8ccce85562a1da2a5285aebd19a6a4cb7d6a163e 100644 (file)
@@ -5262,7 +5262,7 @@ nla_put_failure:
 static size_t if_nlmsg_stats_size(const struct net_device *dev,
                                  u32 filter_mask)
 {
-       size_t size = 0;
+       size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg));
 
        if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
                size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
index 512e629f97803f3216db8f054e97fd1b7a6e6c63..c1601f75ec4b3eac90c1b82c0dc9533e9b1b73eb 100644 (file)
@@ -1376,6 +1376,16 @@ set_sndbuf:
 }
 EXPORT_SYMBOL(sock_setsockopt);
 
+static const struct cred *sk_get_peer_cred(struct sock *sk)
+{
+       const struct cred *cred;
+
+       spin_lock(&sk->sk_peer_lock);
+       cred = get_cred(sk->sk_peer_cred);
+       spin_unlock(&sk->sk_peer_lock);
+
+       return cred;
+}
 
 static void cred_to_ucred(struct pid *pid, const struct cred *cred,
                          struct ucred *ucred)
@@ -1552,7 +1562,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
                struct ucred peercred;
                if (len > sizeof(peercred))
                        len = sizeof(peercred);
+
+               spin_lock(&sk->sk_peer_lock);
                cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
+               spin_unlock(&sk->sk_peer_lock);
+
                if (copy_to_user(optval, &peercred, len))
                        return -EFAULT;
                goto lenout;
@@ -1560,20 +1574,23 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
 
        case SO_PEERGROUPS:
        {
+               const struct cred *cred;
                int ret, n;
 
-               if (!sk->sk_peer_cred)
+               cred = sk_get_peer_cred(sk);
+               if (!cred)
                        return -ENODATA;
 
-               n = sk->sk_peer_cred->group_info->ngroups;
+               n = cred->group_info->ngroups;
                if (len < n * sizeof(gid_t)) {
                        len = n * sizeof(gid_t);
+                       put_cred(cred);
                        return put_user(len, optlen) ? -EFAULT : -ERANGE;
                }
                len = n * sizeof(gid_t);
 
-               ret = groups_to_user((gid_t __user *)optval,
-                                    sk->sk_peer_cred->group_info);
+               ret = groups_to_user((gid_t __user *)optval, cred->group_info);
+               put_cred(cred);
                if (ret)
                        return ret;
                goto lenout;
@@ -1935,9 +1952,10 @@ static void __sk_destruct(struct rcu_head *head)
                sk->sk_frag.page = NULL;
        }
 
-       if (sk->sk_peer_cred)
-               put_cred(sk->sk_peer_cred);
+       /* We do not need to acquire sk->sk_peer_lock, we are the last user. */
+       put_cred(sk->sk_peer_cred);
        put_pid(sk->sk_peer_pid);
+
        if (likely(sk->sk_net_refcnt))
                put_net(sock_net(sk));
        sk_prot_free(sk->sk_prot_creator, sk);
@@ -3145,6 +3163,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
 
        sk->sk_peer_pid         =       NULL;
        sk->sk_peer_cred        =       NULL;
+       spin_lock_init(&sk->sk_peer_lock);
+
        sk->sk_write_pending    =       0;
        sk->sk_rcvlowat         =       1;
        sk->sk_rcvtimeo         =       MAX_SCHEDULE_TIMEOUT;
@@ -3210,24 +3230,8 @@ void release_sock(struct sock *sk)
 }
 EXPORT_SYMBOL(release_sock);
 
-/**
- * lock_sock_fast - fast version of lock_sock
- * @sk: socket
- *
- * This version should be used for very small section, where process wont block
- * return false if fast path is taken:
- *
- *   sk_lock.slock locked, owned = 0, BH disabled
- *
- * return true if slow path is taken:
- *
- *   sk_lock.slock unlocked, owned = 1, BH enabled
- */
-bool lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
+bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
 {
-       /* The sk_lock has mutex_lock() semantics here. */
-       mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
-
        might_sleep();
        spin_lock_bh(&sk->sk_lock.slock);
 
@@ -3256,7 +3260,7 @@ bool lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
        spin_unlock_bh(&sk->sk_lock.slock);
        return true;
 }
-EXPORT_SYMBOL(lock_sock_fast);
+EXPORT_SYMBOL(__lock_sock_fast);
 
 int sock_gettstamp(struct socket *sock, void __user *userstamp,
                   bool timeval, bool time32)
index 54828553975236d8d1d04582220ba701a03b9032..d8ee15f1c7a9ff14b8a172ecf9a5a614b461cdda 100644 (file)
@@ -101,8 +101,6 @@ config NET_DSA_TAG_RTL4_A
 
 config NET_DSA_TAG_OCELOT
        tristate "Tag driver for Ocelot family of switches, using NPI port"
-       depends on MSCC_OCELOT_SWITCH_LIB || \
-                  (MSCC_OCELOT_SWITCH_LIB=n && COMPILE_TEST)
        select PACKING
        help
          Say Y or M if you want to enable NPI tagging for the Ocelot switches
@@ -114,8 +112,6 @@ config NET_DSA_TAG_OCELOT
 
 config NET_DSA_TAG_OCELOT_8021Q
        tristate "Tag driver for Ocelot family of switches, using VLAN"
-       depends on MSCC_OCELOT_SWITCH_LIB || \
-                 (MSCC_OCELOT_SWITCH_LIB=n && COMPILE_TEST)
        help
          Say Y or M if you want to enable support for tagging frames with a
          custom VLAN-based header. Frames that require timestamping, such as
@@ -138,7 +134,6 @@ config NET_DSA_TAG_LAN9303
 
 config NET_DSA_TAG_SJA1105
        tristate "Tag driver for NXP SJA1105 switches"
-       depends on NET_DSA_SJA1105 || !NET_DSA_SJA1105
        select PACKING
        help
          Say Y or M if you want to enable support for tagging frames with the
index b29262eee00bf31160baf9d5644f7589af04afca..e9911b18bdbfa3f1d2b60a644754790cdd8404c9 100644 (file)
@@ -170,7 +170,7 @@ void dsa_bridge_num_put(const struct net_device *bridge_dev, int bridge_num)
        /* Check if the bridge is still in use, otherwise it is time
         * to clean it up so we can reuse this bridge_num later.
         */
-       if (!dsa_bridge_num_find(bridge_dev))
+       if (dsa_bridge_num_find(bridge_dev) < 0)
                clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
 }
 
@@ -811,7 +811,9 @@ static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
                if (!dsa_is_cpu_port(ds, port))
                        continue;
 
+               rtnl_lock();
                err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto);
+               rtnl_unlock();
                if (err) {
                        dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
                                tag_ops->name, ERR_PTR(err));
@@ -1372,12 +1374,15 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
 
        for_each_available_child_of_node(ports, port) {
                err = of_property_read_u32(port, "reg", &reg);
-               if (err)
+               if (err) {
+                       of_node_put(port);
                        goto out_put_node;
+               }
 
                if (reg >= ds->num_ports) {
                        dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%zu)\n",
                                port, reg, ds->num_ports);
+                       of_node_put(port);
                        err = -EINVAL;
                        goto out_put_node;
                }
@@ -1385,8 +1390,10 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
                dp = dsa_to_port(ds, reg);
 
                err = dsa_port_parse_of(dp, port);
-               if (err)
+               if (err) {
+                       of_node_put(port);
                        goto out_put_node;
+               }
        }
 
 out_put_node:
index 1c797ec8e2c2b805a890df560c80b4f0eb45d4da..6466d0539af9ffea73dda13dfc1e656fe607cf4d 100644 (file)
@@ -168,7 +168,7 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
                if (extack._msg)
                        dev_err(ds->dev, "port %d: %s\n", info->port,
                                extack._msg);
-               if (err && err != EOPNOTSUPP)
+               if (err && err != -EOPNOTSUPP)
                        return err;
        }
 
index 77d0ce89ab77abb174aae90c5094f7a1a3ec5ae2..b3da4b2ea11cf693e391f5beb28f2586593d188d 100644 (file)
@@ -45,6 +45,7 @@
  *   6    6       2        2      4    2       N
  */
 
+#include <linux/dsa/mv88e6xxx.h>
 #include <linux/etherdevice.h>
 #include <linux/list.h>
 #include <linux/slab.h>
@@ -129,12 +130,9 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
        u8 tag_dev, tag_port;
        enum dsa_cmd cmd;
        u8 *dsa_header;
-       u16 pvid = 0;
-       int err;
 
        if (skb->offload_fwd_mark) {
                struct dsa_switch_tree *dst = dp->ds->dst;
-               struct net_device *br = dp->bridge_dev;
 
                cmd = DSA_CMD_FORWARD;
 
@@ -144,19 +142,6 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
                 */
                tag_dev = dst->last_switch + 1 + dp->bridge_num;
                tag_port = 0;
-
-               /* If we are offloading forwarding for a VLAN-unaware bridge,
-                * inject packets to hardware using the bridge's pvid, since
-                * that's where the packets ingressed from.
-                */
-               if (!br_vlan_enabled(br)) {
-                       /* Safe because __dev_queue_xmit() runs under
-                        * rcu_read_lock_bh()
-                        */
-                       err = br_vlan_get_pvid_rcu(br, &pvid);
-                       if (err)
-                               return NULL;
-               }
        } else {
                cmd = DSA_CMD_FROM_CPU;
                tag_dev = dp->ds->index;
@@ -180,16 +165,21 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
                        dsa_header[2] &= ~0x10;
                }
        } else {
+               struct net_device *br = dp->bridge_dev;
+               u16 vid;
+
+               vid = br ? MV88E6XXX_VID_BRIDGED : MV88E6XXX_VID_STANDALONE;
+
                skb_push(skb, DSA_HLEN + extra);
                dsa_alloc_etype_header(skb, DSA_HLEN + extra);
 
-               /* Construct untagged DSA tag. */
+               /* Construct DSA header from untagged frame. */
                dsa_header = dsa_etype_header_pos_tx(skb) + extra;
 
                dsa_header[0] = (cmd << 6) | tag_dev;
                dsa_header[1] = tag_port << 3;
-               dsa_header[2] = pvid >> 8;
-               dsa_header[3] = pvid & 0xff;
+               dsa_header[2] = vid >> 8;
+               dsa_header[3] = vid & 0xff;
        }
 
        return skb;
@@ -210,7 +200,7 @@ static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
        cmd = dsa_header[0] >> 6;
        switch (cmd) {
        case DSA_CMD_FORWARD:
-               trunk = !!(dsa_header[1] & 7);
+               trunk = !!(dsa_header[1] & 4);
                break;
 
        case DSA_CMD_TO_CPU:
index 8025ed778d33aa20f96702c1e71dbf4859d3034e..605b51ca692105628a1b5108d7d596bf2b8c223b 100644 (file)
@@ -2,7 +2,6 @@
 /* Copyright 2019 NXP
  */
 #include <linux/dsa/ocelot.h>
-#include <soc/mscc/ocelot.h>
 #include "dsa_priv.h"
 
 static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
index 59072930cb021e19ad2a66cac490d7edc2a898e0..3412051981d7bd99c6e58e2aeb0a82d62439bbe9 100644 (file)
@@ -9,10 +9,32 @@
  *   that on egress
  */
 #include <linux/dsa/8021q.h>
-#include <soc/mscc/ocelot.h>
-#include <soc/mscc/ocelot_ptp.h>
+#include <linux/dsa/ocelot.h>
 #include "dsa_priv.h"
 
+static struct sk_buff *ocelot_defer_xmit(struct dsa_port *dp,
+                                        struct sk_buff *skb)
+{
+       struct felix_deferred_xmit_work *xmit_work;
+       struct felix_port *felix_port = dp->priv;
+
+       xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC);
+       if (!xmit_work)
+               return NULL;
+
+       /* Calls felix_port_deferred_xmit in felix.c */
+       kthread_init_work(&xmit_work->work, felix_port->xmit_work_fn);
+       /* Increase refcount so the kfree_skb in dsa_slave_xmit
+        * won't really free the packet.
+        */
+       xmit_work->dp = dp;
+       xmit_work->skb = skb_get(skb);
+
+       kthread_queue_work(felix_port->xmit_worker, &xmit_work->work);
+
+       return NULL;
+}
+
 static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
                                   struct net_device *netdev)
 {
@@ -20,18 +42,10 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
        u16 tx_vid = dsa_8021q_tx_vid(dp->ds, dp->index);
        u16 queue_mapping = skb_get_queue_mapping(skb);
        u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
-       struct ocelot *ocelot = dp->ds->priv;
-       int port = dp->index;
-       u32 rew_op = 0;
+       struct ethhdr *hdr = eth_hdr(skb);
 
-       rew_op = ocelot_ptp_rew_op(skb);
-       if (rew_op) {
-               if (!ocelot_can_inject(ocelot, 0))
-                       return NULL;
-
-               ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
-               return NULL;
-       }
+       if (ocelot_ptp_rew_op(skb) || is_link_local_ether_addr(hdr->h_dest))
+               return ocelot_defer_xmit(dp, skb);
 
        return dsa_8021q_xmit(skb, netdev, ETH_P_8021Q,
                              ((pcp << VLAN_PRIO_SHIFT) | tx_vid));
index c054f48541c8d621ca018cb94016193789e66c37..2edede9ddac93b388508cd1ccee6f4c6103cee27 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/if_vlan.h>
 #include <linux/dsa/sja1105.h>
 #include <linux/dsa/8021q.h>
+#include <linux/skbuff.h>
 #include <linux/packing.h>
 #include "dsa_priv.h"
 
 #define SJA1110_TX_TRAILER_LEN                 4
 #define SJA1110_MAX_PADDING_LEN                        15
 
+enum sja1110_meta_tstamp {
+       SJA1110_META_TSTAMP_TX = 0,
+       SJA1110_META_TSTAMP_RX = 1,
+};
+
 /* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */
 static inline bool sja1105_is_link_local(const struct sk_buff *skb)
 {
@@ -520,6 +526,43 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
                                              is_meta);
 }
 
+static void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port,
+                                       u8 ts_id, enum sja1110_meta_tstamp dir,
+                                       u64 tstamp)
+{
+       struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+       struct dsa_port *dp = dsa_to_port(ds, port);
+       struct skb_shared_hwtstamps shwt = {0};
+       struct sja1105_port *sp = dp->priv;
+
+       if (!dsa_port_is_sja1105(dp))
+               return;
+
+       /* We don't care about RX timestamps on the CPU port */
+       if (dir == SJA1110_META_TSTAMP_RX)
+               return;
+
+       spin_lock(&sp->data->skb_txtstamp_queue.lock);
+
+       skb_queue_walk_safe(&sp->data->skb_txtstamp_queue, skb, skb_tmp) {
+               if (SJA1105_SKB_CB(skb)->ts_id != ts_id)
+                       continue;
+
+               __skb_unlink(skb, &sp->data->skb_txtstamp_queue);
+               skb_match = skb;
+
+               break;
+       }
+
+       spin_unlock(&sp->data->skb_txtstamp_queue.lock);
+
+       if (WARN_ON(!skb_match))
+               return;
+
+       shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp));
+       skb_complete_tx_timestamp(skb_match, &shwt);
+}
+
 static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header)
 {
        u8 *buf = dsa_etype_header_pos_rx(skb) + SJA1110_HEADER_LEN;
index b42c429cebbe805beabd41233b24203fb82fc0cf..3364cb9c67e018fea2b2e370046de5252581b996 100644 (file)
@@ -1661,7 +1661,7 @@ EXPORT_SYMBOL_GPL(fib_nexthop_info);
 
 #if IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) || IS_ENABLED(CONFIG_IPV6)
 int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
-                   int nh_weight, u8 rt_family)
+                   int nh_weight, u8 rt_family, u32 nh_tclassid)
 {
        const struct net_device *dev = nhc->nhc_dev;
        struct rtnexthop *rtnh;
@@ -1679,6 +1679,9 @@ int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
 
        rtnh->rtnh_flags = flags;
 
+       if (nh_tclassid && nla_put_u32(skb, RTA_FLOW, nh_tclassid))
+               goto nla_put_failure;
+
        /* length of rtnetlink header + attributes */
        rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
 
@@ -1706,14 +1709,13 @@ static int fib_add_multipath(struct sk_buff *skb, struct fib_info *fi)
        }
 
        for_nexthops(fi) {
-               if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight,
-                                   AF_INET) < 0)
-                       goto nla_put_failure;
+               u32 nh_tclassid = 0;
 #ifdef CONFIG_IP_ROUTE_CLASSID
-               if (nh->nh_tclassid &&
-                   nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
-                       goto nla_put_failure;
+               nh_tclassid = nh->nh_tclassid;
 #endif
+               if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight,
+                                   AF_INET, nh_tclassid) < 0)
+                       goto nla_put_failure;
        } endfor_nexthops(fi);
 
 mp_end:
index 8b30cadff708f96f4acf4a459c8e9c0397f8d305..b7e277d8a84d224cb9c034321e688d765d01c07f 100644 (file)
@@ -1054,14 +1054,19 @@ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
        iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(iio->extobj_hdr), &_iio);
        if (!ext_hdr || !iio)
                goto send_mal_query;
-       if (ntohs(iio->extobj_hdr.length) <= sizeof(iio->extobj_hdr))
+       if (ntohs(iio->extobj_hdr.length) <= sizeof(iio->extobj_hdr) ||
+           ntohs(iio->extobj_hdr.length) > sizeof(_iio))
                goto send_mal_query;
        ident_len = ntohs(iio->extobj_hdr.length) - sizeof(iio->extobj_hdr);
+       iio = skb_header_pointer(skb, sizeof(_ext_hdr),
+                                sizeof(iio->extobj_hdr) + ident_len, &_iio);
+       if (!iio)
+               goto send_mal_query;
+
        status = 0;
        dev = NULL;
        switch (iio->extobj_hdr.class_type) {
        case ICMP_EXT_ECHO_CTYPE_NAME:
-               iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(_iio), &_iio);
                if (ident_len >= IFNAMSIZ)
                        goto send_mal_query;
                memset(buff, 0, sizeof(buff));
@@ -1069,30 +1074,24 @@ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
                dev = dev_get_by_name(net, buff);
                break;
        case ICMP_EXT_ECHO_CTYPE_INDEX:
-               iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(iio->extobj_hdr) +
-                                        sizeof(iio->ident.ifindex), &_iio);
                if (ident_len != sizeof(iio->ident.ifindex))
                        goto send_mal_query;
                dev = dev_get_by_index(net, ntohl(iio->ident.ifindex));
                break;
        case ICMP_EXT_ECHO_CTYPE_ADDR:
-               if (ident_len != sizeof(iio->ident.addr.ctype3_hdr) +
+               if (ident_len < sizeof(iio->ident.addr.ctype3_hdr) ||
+                   ident_len != sizeof(iio->ident.addr.ctype3_hdr) +
                                 iio->ident.addr.ctype3_hdr.addrlen)
                        goto send_mal_query;
                switch (ntohs(iio->ident.addr.ctype3_hdr.afi)) {
                case ICMP_AFI_IP:
-                       iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(iio->extobj_hdr) +
-                                                sizeof(struct in_addr), &_iio);
-                       if (ident_len != sizeof(iio->ident.addr.ctype3_hdr) +
-                                        sizeof(struct in_addr))
+                       if (iio->ident.addr.ctype3_hdr.addrlen != sizeof(struct in_addr))
                                goto send_mal_query;
                        dev = ip_dev_find(net, iio->ident.addr.ip_addr.ipv4_addr);
                        break;
 #if IS_ENABLED(CONFIG_IPV6)
                case ICMP_AFI_IP6:
-                       iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(_iio), &_iio);
-                       if (ident_len != sizeof(iio->ident.addr.ctype3_hdr) +
-                                        sizeof(struct in6_addr))
+                       if (iio->ident.addr.ctype3_hdr.addrlen != sizeof(struct in6_addr))
                                goto send_mal_query;
                        dev = ipv6_stub->ipv6_dev_find(net, &iio->ident.addr.ip_addr.ipv6_addr, dev);
                        dev_hold(dev);
index 80aeaf9e6e16e004011301c471e4dbf17a89e30d..bfb522e513461a92cbd19c0c2c14b2dda33bb4f7 100644 (file)
@@ -242,8 +242,10 @@ static inline int compute_score(struct sock *sk, struct net *net,
 
                if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
                        return -1;
+               score =  sk->sk_bound_dev_if ? 2 : 1;
 
-               score = sk->sk_family == PF_INET ? 2 : 1;
+               if (sk->sk_family == PF_INET)
+                       score++;
                if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
                        score++;
        }
index b88e0f36cd05ae067be12599e00a896c7e93dc9c..8265c676570533cf3f752562dbdd54bfb0508688 100644 (file)
@@ -42,7 +42,7 @@ iptable_raw_hook(void *priv, struct sk_buff *skb,
 
 static struct nf_hook_ops *rawtable_ops __read_mostly;
 
-static int __net_init iptable_raw_table_init(struct net *net)
+static int iptable_raw_table_init(struct net *net)
 {
        struct ipt_replace *repl;
        const struct xt_table *table = &packet_raw;
index 613432a36f0a701afc993d23053bfc03c4a8ed0a..e61ea428ea1872077ec6c2917b8c511518a692d8 100644 (file)
 #endif
 #include <net/netfilter/nf_conntrack_zones.h>
 
-static unsigned int defrag4_pernet_id __read_mostly;
 static DEFINE_MUTEX(defrag4_mutex);
 
-struct defrag4_pernet {
-       unsigned int users;
-};
-
 static int nf_ct_ipv4_gather_frags(struct net *net, struct sk_buff *skb,
                                   u_int32_t user)
 {
@@ -111,19 +106,15 @@ static const struct nf_hook_ops ipv4_defrag_ops[] = {
 
 static void __net_exit defrag4_net_exit(struct net *net)
 {
-       struct defrag4_pernet *nf_defrag = net_generic(net, defrag4_pernet_id);
-
-       if (nf_defrag->users) {
+       if (net->nf.defrag_ipv4_users) {
                nf_unregister_net_hooks(net, ipv4_defrag_ops,
                                        ARRAY_SIZE(ipv4_defrag_ops));
-               nf_defrag->users = 0;
+               net->nf.defrag_ipv4_users = 0;
        }
 }
 
 static struct pernet_operations defrag4_net_ops = {
        .exit = defrag4_net_exit,
-       .id   = &defrag4_pernet_id,
-       .size = sizeof(struct defrag4_pernet),
 };
 
 static int __init nf_defrag_init(void)
@@ -138,24 +129,23 @@ static void __exit nf_defrag_fini(void)
 
 int nf_defrag_ipv4_enable(struct net *net)
 {
-       struct defrag4_pernet *nf_defrag = net_generic(net, defrag4_pernet_id);
        int err = 0;
 
        mutex_lock(&defrag4_mutex);
-       if (nf_defrag->users == UINT_MAX) {
+       if (net->nf.defrag_ipv4_users == UINT_MAX) {
                err = -EOVERFLOW;
                goto out_unlock;
        }
 
-       if (nf_defrag->users) {
-               nf_defrag->users++;
+       if (net->nf.defrag_ipv4_users) {
+               net->nf.defrag_ipv4_users++;
                goto out_unlock;
        }
 
        err = nf_register_net_hooks(net, ipv4_defrag_ops,
                                    ARRAY_SIZE(ipv4_defrag_ops));
        if (err == 0)
-               nf_defrag->users = 1;
+               net->nf.defrag_ipv4_users = 1;
 
  out_unlock:
        mutex_unlock(&defrag4_mutex);
@@ -165,12 +155,10 @@ EXPORT_SYMBOL_GPL(nf_defrag_ipv4_enable);
 
 void nf_defrag_ipv4_disable(struct net *net)
 {
-       struct defrag4_pernet *nf_defrag = net_generic(net, defrag4_pernet_id);
-
        mutex_lock(&defrag4_mutex);
-       if (nf_defrag->users) {
-               nf_defrag->users--;
-               if (nf_defrag->users == 0)
+       if (net->nf.defrag_ipv4_users) {
+               net->nf.defrag_ipv4_users--;
+               if (net->nf.defrag_ipv4_users == 0)
                        nf_unregister_net_hooks(net, ipv4_defrag_ops,
                                                ARRAY_SIZE(ipv4_defrag_ops));
        }
index 2e62e0d6373a6ee5d98756fb1967bff9743ffa02..5b8ce65dfc06789c4363fd49ea3525690bf54198 100644 (file)
@@ -1037,6 +1037,20 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
 DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
 EXPORT_SYMBOL(tcp_md5_needed);
 
+static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new)
+{
+       if (!old)
+               return true;
+
+       /* l3index always overrides non-l3index */
+       if (old->l3index && new->l3index == 0)
+               return false;
+       if (old->l3index == 0 && new->l3index)
+               return true;
+
+       return old->prefixlen < new->prefixlen;
+}
+
 /* Find the Key structure for an address.  */
 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
                                           const union tcp_md5_addr *addr,
@@ -1059,7 +1073,7 @@ struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
                                 lockdep_sock_is_held(sk)) {
                if (key->family != family)
                        continue;
-               if (key->l3index && key->l3index != l3index)
+               if (key->flags & TCP_MD5SIG_FLAG_IFINDEX && key->l3index != l3index)
                        continue;
                if (family == AF_INET) {
                        mask = inet_make_mask(key->prefixlen);
@@ -1074,8 +1088,7 @@ struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
                        match = false;
                }
 
-               if (match && (!best_match ||
-                             key->prefixlen > best_match->prefixlen))
+               if (match && better_md5_match(best_match, key))
                        best_match = key;
        }
        return best_match;
@@ -1085,7 +1098,7 @@ EXPORT_SYMBOL(__tcp_md5_do_lookup);
 static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
                                                      const union tcp_md5_addr *addr,
                                                      int family, u8 prefixlen,
-                                                     int l3index)
+                                                     int l3index, u8 flags)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        struct tcp_md5sig_key *key;
@@ -1105,7 +1118,9 @@ static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
                                 lockdep_sock_is_held(sk)) {
                if (key->family != family)
                        continue;
-               if (key->l3index && key->l3index != l3index)
+               if ((key->flags & TCP_MD5SIG_FLAG_IFINDEX) != (flags & TCP_MD5SIG_FLAG_IFINDEX))
+                       continue;
+               if (key->l3index != l3index)
                        continue;
                if (!memcmp(&key->addr, addr, size) &&
                    key->prefixlen == prefixlen)
@@ -1129,7 +1144,7 @@ EXPORT_SYMBOL(tcp_v4_md5_lookup);
 
 /* This can be called on a newly created socket, from other files */
 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
-                  int family, u8 prefixlen, int l3index,
+                  int family, u8 prefixlen, int l3index, u8 flags,
                   const u8 *newkey, u8 newkeylen, gfp_t gfp)
 {
        /* Add Key to the list */
@@ -1137,7 +1152,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
        struct tcp_sock *tp = tcp_sk(sk);
        struct tcp_md5sig_info *md5sig;
 
-       key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
+       key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
        if (key) {
                /* Pre-existing entry - just update that one.
                 * Note that the key might be used concurrently.
@@ -1182,6 +1197,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
        key->family = family;
        key->prefixlen = prefixlen;
        key->l3index = l3index;
+       key->flags = flags;
        memcpy(&key->addr, addr,
               (family == AF_INET6) ? sizeof(struct in6_addr) :
                                      sizeof(struct in_addr));
@@ -1191,11 +1207,11 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
 EXPORT_SYMBOL(tcp_md5_do_add);
 
 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
-                  u8 prefixlen, int l3index)
+                  u8 prefixlen, int l3index, u8 flags)
 {
        struct tcp_md5sig_key *key;
 
-       key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
+       key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
        if (!key)
                return -ENOENT;
        hlist_del_rcu(&key->node);
@@ -1229,6 +1245,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
        const union tcp_md5_addr *addr;
        u8 prefixlen = 32;
        int l3index = 0;
+       u8 flags;
 
        if (optlen < sizeof(cmd))
                return -EINVAL;
@@ -1239,6 +1256,8 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
        if (sin->sin_family != AF_INET)
                return -EINVAL;
 
+       flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
+
        if (optname == TCP_MD5SIG_EXT &&
            cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
                prefixlen = cmd.tcpm_prefixlen;
@@ -1246,7 +1265,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
                        return -EINVAL;
        }
 
-       if (optname == TCP_MD5SIG_EXT &&
+       if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
            cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
                struct net_device *dev;
 
@@ -1267,12 +1286,12 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
        addr = (union tcp_md5_addr *)&sin->sin_addr.s_addr;
 
        if (!cmd.tcpm_keylen)
-               return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index);
+               return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index, flags);
 
        if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
                return -EINVAL;
 
-       return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index,
+       return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index, flags,
                              cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 }
 
@@ -1596,7 +1615,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
                 * memory, then we end up not copying the key
                 * across. Shucks.
                 */
-               tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index,
+               tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index, key->flags,
                               key->key, key->keylen, GFP_ATOMIC);
                sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
        }
index 8851c9463b4b62c9017565f545250c4ffe22927c..8536b2a7210b2ee88443d7bdfe21d22fd1433997 100644 (file)
@@ -390,7 +390,8 @@ static int compute_score(struct sock *sk, struct net *net,
                                        dif, sdif);
        if (!dev_match)
                return -1;
-       score += 4;
+       if (sk->sk_bound_dev_if)
+               score += 4;
 
        if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
                score++;
@@ -1053,7 +1054,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        __be16 dport;
        u8  tos;
        int err, is_udplite = IS_UDPLITE(sk);
-       int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
+       int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
        int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
        struct sk_buff *skb;
        struct ip_options_data opt_copy;
@@ -1361,7 +1362,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
        }
 
        up->len += size;
-       if (!(up->corkflag || (flags&MSG_MORE)))
+       if (!(READ_ONCE(up->corkflag) || (flags&MSG_MORE)))
                ret = udp_push_pending_frames(sk);
        if (!ret)
                ret = size;
@@ -2662,9 +2663,9 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
        switch (optname) {
        case UDP_CORK:
                if (val != 0) {
-                       up->corkflag = 1;
+                       WRITE_ONCE(up->corkflag, 1);
                } else {
-                       up->corkflag = 0;
+                       WRITE_ONCE(up->corkflag, 0);
                        lock_sock(sk);
                        push_pending_frames(sk);
                        release_sock(sk);
@@ -2787,7 +2788,7 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
 
        switch (optname) {
        case UDP_CORK:
-               val = up->corkflag;
+               val = READ_ONCE(up->corkflag);
                break;
 
        case UDP_ENCAP:
index 55c290d556059a989cebc6d26cb77e8b01edb5a5..67c9114835c84864a3353c6f1c16853ea62a21c5 100644 (file)
@@ -106,7 +106,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
                if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
                        return -1;
 
-               score = 1;
+               score =  sk->sk_bound_dev_if ? 2 : 1;
                if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
                        score++;
        }
index 5e8961004832e3e9be341c5c5d203d29ae05be3c..d128172bb54976bac370449c2204d42cd5b84e69 100644 (file)
@@ -770,6 +770,66 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
                data += sizeof(__be32);
        }
 
+       /* bit12 undefined: filled with empty value */
+       if (trace->type.bit12) {
+               *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+               data += sizeof(__be32);
+       }
+
+       /* bit13 undefined: filled with empty value */
+       if (trace->type.bit13) {
+               *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+               data += sizeof(__be32);
+       }
+
+       /* bit14 undefined: filled with empty value */
+       if (trace->type.bit14) {
+               *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+               data += sizeof(__be32);
+       }
+
+       /* bit15 undefined: filled with empty value */
+       if (trace->type.bit15) {
+               *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+               data += sizeof(__be32);
+       }
+
+       /* bit16 undefined: filled with empty value */
+       if (trace->type.bit16) {
+               *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+               data += sizeof(__be32);
+       }
+
+       /* bit17 undefined: filled with empty value */
+       if (trace->type.bit17) {
+               *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+               data += sizeof(__be32);
+       }
+
+       /* bit18 undefined: filled with empty value */
+       if (trace->type.bit18) {
+               *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+               data += sizeof(__be32);
+       }
+
+       /* bit19 undefined: filled with empty value */
+       if (trace->type.bit19) {
+               *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+               data += sizeof(__be32);
+       }
+
+       /* bit20 undefined: filled with empty value */
+       if (trace->type.bit20) {
+               *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+               data += sizeof(__be32);
+       }
+
+       /* bit21 undefined: filled with empty value */
+       if (trace->type.bit21) {
+               *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+               data += sizeof(__be32);
+       }
+
        /* opaque state snapshot */
        if (trace->type.bit22) {
                if (!sc) {
@@ -791,16 +851,10 @@ void ioam6_fill_trace_data(struct sk_buff *skb,
        struct ioam6_schema *sc;
        u8 sclen = 0;
 
-       /* Skip if Overflow flag is set OR
-        * if an unknown type (bit 12-21) is set
+       /* Skip if Overflow flag is set
         */
-       if (trace->overflow ||
-           trace->type.bit12 | trace->type.bit13 | trace->type.bit14 |
-           trace->type.bit15 | trace->type.bit16 | trace->type.bit17 |
-           trace->type.bit18 | trace->type.bit19 | trace->type.bit20 |
-           trace->type.bit21) {
+       if (trace->overflow)
                return;
-       }
 
        /* NodeLen does not include Opaque State Snapshot length. We need to
         * take it into account if the corresponding bit is set (bit 22) and
index f9ee04541c17fcfc52a2c5843d62bbdc72a2a815..9b7b726f8f45f2f0fb65b0792d2ae29e8042b7ab 100644 (file)
@@ -75,7 +75,11 @@ static bool ioam6_validate_trace_hdr(struct ioam6_trace_hdr *trace)
        u32 fields;
 
        if (!trace->type_be32 || !trace->remlen ||
-           trace->remlen > IOAM6_TRACE_DATA_SIZE_MAX / 4)
+           trace->remlen > IOAM6_TRACE_DATA_SIZE_MAX / 4 ||
+           trace->type.bit12 | trace->type.bit13 | trace->type.bit14 |
+           trace->type.bit15 | trace->type.bit16 | trace->type.bit17 |
+           trace->type.bit18 | trace->type.bit19 | trace->type.bit20 |
+           trace->type.bit21)
                return false;
 
        trace->nodelen = 0;
index 12f985f43bccfb05d693111f32e58e27aca7da2a..2f044a49afa8cf3586c36607c34073edecafc69c 100644 (file)
@@ -464,13 +464,14 @@ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
 
 int ip6_forward(struct sk_buff *skb)
 {
-       struct inet6_dev *idev = __in6_dev_get_safely(skb->dev);
        struct dst_entry *dst = skb_dst(skb);
        struct ipv6hdr *hdr = ipv6_hdr(skb);
        struct inet6_skb_parm *opt = IP6CB(skb);
        struct net *net = dev_net(dst->dev);
+       struct inet6_dev *idev;
        u32 mtu;
 
+       idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
        if (net->ipv6.devconf_all->forwarding == 0)
                goto error;
 
index de2cf3943b91e4eed16937dc865b009eb451f76e..a579ea14a69b67cdd641e1bf1dc943b8b8468007 100644 (file)
@@ -273,6 +273,7 @@ ip6t_do_table(struct sk_buff *skb,
         * things we don't know, ie. tcp syn flag or ports).  If the
         * rule is also a fragment-specific rule, non-fragments won't
         * match it. */
+       acpar.fragoff = 0;
        acpar.hotdrop = false;
        acpar.state   = state;
 
index 733c83d38b30889813b92ce1bcbfcc002868e918..4ad8b2032f1f92210fc041cd96a0c8eb31d90c9e 100644 (file)
@@ -25,12 +25,7 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
 static inline bool
 segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert)
 {
-       bool r;
-       pr_debug("segsleft_match:%c 0x%x <= 0x%x <= 0x%x\n",
-                invert ? '!' : ' ', min, id, max);
-       r = (id >= min && id <= max) ^ invert;
-       pr_debug(" result %s\n", r ? "PASS" : "FAILED");
-       return r;
+       return (id >= min && id <= max) ^ invert;
 }
 
 static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
@@ -65,30 +60,6 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
                return false;
        }
 
-       pr_debug("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen);
-       pr_debug("TYPE %04X ", rh->type);
-       pr_debug("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left);
-
-       pr_debug("IPv6 RT segsleft %02X ",
-                segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
-                               rh->segments_left,
-                               !!(rtinfo->invflags & IP6T_RT_INV_SGS)));
-       pr_debug("type %02X %02X %02X ",
-                rtinfo->rt_type, rh->type,
-                (!(rtinfo->flags & IP6T_RT_TYP) ||
-                 ((rtinfo->rt_type == rh->type) ^
-                  !!(rtinfo->invflags & IP6T_RT_INV_TYP))));
-       pr_debug("len %02X %04X %02X ",
-                rtinfo->hdrlen, hdrlen,
-                !(rtinfo->flags & IP6T_RT_LEN) ||
-                 ((rtinfo->hdrlen == hdrlen) ^
-                  !!(rtinfo->invflags & IP6T_RT_INV_LEN)));
-       pr_debug("res %02X %02X %02X ",
-                rtinfo->flags & IP6T_RT_RES,
-                ((const struct rt0_hdr *)rh)->reserved,
-                !((rtinfo->flags & IP6T_RT_RES) &&
-                  (((const struct rt0_hdr *)rh)->reserved)));
-
        ret = (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
                              rh->segments_left,
                              !!(rtinfo->invflags & IP6T_RT_INV_SGS))) &&
@@ -107,22 +78,22 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
                                                       reserved),
                                        sizeof(_reserved),
                                        &_reserved);
+               if (!rp) {
+                       par->hotdrop = true;
+                       return false;
+               }
 
                ret = (*rp == 0);
        }
 
-       pr_debug("#%d ", rtinfo->addrnr);
        if (!(rtinfo->flags & IP6T_RT_FST)) {
                return ret;
        } else if (rtinfo->flags & IP6T_RT_FST_NSTRICT) {
-               pr_debug("Not strict ");
                if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
-                       pr_debug("There isn't enough space\n");
                        return false;
                } else {
                        unsigned int i = 0;
 
-                       pr_debug("#%d ", rtinfo->addrnr);
                        for (temp = 0;
                             temp < (unsigned int)((hdrlen - 8) / 16);
                             temp++) {
@@ -138,26 +109,20 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
                                        return false;
                                }
 
-                               if (ipv6_addr_equal(ap, &rtinfo->addrs[i])) {
-                                       pr_debug("i=%d temp=%d;\n", i, temp);
+                               if (ipv6_addr_equal(ap, &rtinfo->addrs[i]))
                                        i++;
-                               }
                                if (i == rtinfo->addrnr)
                                        break;
                        }
-                       pr_debug("i=%d #%d\n", i, rtinfo->addrnr);
                        if (i == rtinfo->addrnr)
                                return ret;
                        else
                                return false;
                }
        } else {
-               pr_debug("Strict ");
                if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
-                       pr_debug("There isn't enough space\n");
                        return false;
                } else {
-                       pr_debug("#%d ", rtinfo->addrnr);
                        for (temp = 0; temp < rtinfo->addrnr; temp++) {
                                ap = skb_header_pointer(skb,
                                                        ptr
@@ -173,7 +138,6 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
                                if (!ipv6_addr_equal(ap, &rtinfo->addrs[temp]))
                                        break;
                        }
-                       pr_debug("temp=%d #%d\n", temp, rtinfo->addrnr);
                        if (temp == rtinfo->addrnr &&
                            temp == (unsigned int)((hdrlen - 8) / 16))
                                return ret;
index a0108415275fe02a643efb307743b503985ae3f2..5c47be29b9ee9832069aa4b9e97852e22dd4278b 100644 (file)
@@ -33,7 +33,7 @@
 
 static const char nf_frags_cache_name[] = "nf-frags";
 
-unsigned int nf_frag_pernet_id __read_mostly;
+static unsigned int nf_frag_pernet_id __read_mostly;
 static struct inet_frags nf_frags;
 
 static struct nft_ct_frag6_pernet *nf_frag_pernet(struct net *net)
index e8a59d8bf2adf705824586f6f60b35b53d2be9e6..cb4eb1d2c620b96f23e516d17835a2aada6f0fa8 100644 (file)
@@ -25,8 +25,6 @@
 #include <net/netfilter/nf_conntrack_zones.h>
 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
 
-extern unsigned int nf_frag_pernet_id;
-
 static DEFINE_MUTEX(defrag6_mutex);
 
 static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
@@ -91,12 +89,10 @@ static const struct nf_hook_ops ipv6_defrag_ops[] = {
 
 static void __net_exit defrag6_net_exit(struct net *net)
 {
-       struct nft_ct_frag6_pernet *nf_frag = net_generic(net, nf_frag_pernet_id);
-
-       if (nf_frag->users) {
+       if (net->nf.defrag_ipv6_users) {
                nf_unregister_net_hooks(net, ipv6_defrag_ops,
                                        ARRAY_SIZE(ipv6_defrag_ops));
-               nf_frag->users = 0;
+               net->nf.defrag_ipv6_users = 0;
        }
 }
 
@@ -134,24 +130,23 @@ static void __exit nf_defrag_fini(void)
 
 int nf_defrag_ipv6_enable(struct net *net)
 {
-       struct nft_ct_frag6_pernet *nf_frag = net_generic(net, nf_frag_pernet_id);
        int err = 0;
 
        mutex_lock(&defrag6_mutex);
-       if (nf_frag->users == UINT_MAX) {
+       if (net->nf.defrag_ipv6_users == UINT_MAX) {
                err = -EOVERFLOW;
                goto out_unlock;
        }
 
-       if (nf_frag->users) {
-               nf_frag->users++;
+       if (net->nf.defrag_ipv6_users) {
+               net->nf.defrag_ipv6_users++;
                goto out_unlock;
        }
 
        err = nf_register_net_hooks(net, ipv6_defrag_ops,
                                    ARRAY_SIZE(ipv6_defrag_ops));
        if (err == 0)
-               nf_frag->users = 1;
+               net->nf.defrag_ipv6_users = 1;
 
  out_unlock:
        mutex_unlock(&defrag6_mutex);
@@ -161,12 +156,10 @@ EXPORT_SYMBOL_GPL(nf_defrag_ipv6_enable);
 
 void nf_defrag_ipv6_disable(struct net *net)
 {
-       struct nft_ct_frag6_pernet *nf_frag = net_generic(net, nf_frag_pernet_id);
-
        mutex_lock(&defrag6_mutex);
-       if (nf_frag->users) {
-               nf_frag->users--;
-               if (nf_frag->users == 0)
+       if (net->nf.defrag_ipv6_users) {
+               net->nf.defrag_ipv6_users--;
+               if (net->nf.defrag_ipv6_users == 0)
                        nf_unregister_net_hooks(net, ipv6_defrag_ops,
                                                ARRAY_SIZE(ipv6_defrag_ops));
        }
index dbc2240239777b5fb82ebf40841541ae60813d14..9b9ef09382ab911b25e03b2ed7000c24411bf5cb 100644 (file)
@@ -5681,14 +5681,15 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
                        goto nla_put_failure;
 
                if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
-                                   rt->fib6_nh->fib_nh_weight, AF_INET6) < 0)
+                                   rt->fib6_nh->fib_nh_weight, AF_INET6,
+                                   0) < 0)
                        goto nla_put_failure;
 
                list_for_each_entry_safe(sibling, next_sibling,
                                         &rt->fib6_siblings, fib6_siblings) {
                        if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
                                            sibling->fib6_nh->fib_nh_weight,
-                                           AF_INET6) < 0)
+                                           AF_INET6, 0) < 0)
                                goto nla_put_failure;
                }
 
index 0ce52d46e4f81b221a6acd4a0dd7b0d462dbac7a..b03dd02c9f13c50aa6731174d23d4218859cb49d 100644 (file)
@@ -599,6 +599,7 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
        struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
        int l3index = 0;
        u8 prefixlen;
+       u8 flags;
 
        if (optlen < sizeof(cmd))
                return -EINVAL;
@@ -609,6 +610,8 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
        if (sin6->sin6_family != AF_INET6)
                return -EINVAL;
 
+       flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
+
        if (optname == TCP_MD5SIG_EXT &&
            cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
                prefixlen = cmd.tcpm_prefixlen;
@@ -619,7 +622,7 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
                prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
        }
 
-       if (optname == TCP_MD5SIG_EXT &&
+       if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
            cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
                struct net_device *dev;
 
@@ -640,9 +643,9 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
                if (ipv6_addr_v4mapped(&sin6->sin6_addr))
                        return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
                                              AF_INET, prefixlen,
-                                             l3index);
+                                             l3index, flags);
                return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
-                                     AF_INET6, prefixlen, l3index);
+                                     AF_INET6, prefixlen, l3index, flags);
        }
 
        if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
@@ -650,12 +653,12 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
 
        if (ipv6_addr_v4mapped(&sin6->sin6_addr))
                return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
-                                     AF_INET, prefixlen, l3index,
+                                     AF_INET, prefixlen, l3index, flags,
                                      cmd.tcpm_key, cmd.tcpm_keylen,
                                      GFP_KERNEL);
 
        return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
-                             AF_INET6, prefixlen, l3index,
+                             AF_INET6, prefixlen, l3index, flags,
                              cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 }
 
@@ -1404,7 +1407,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
                 * across. Shucks.
                 */
                tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
-                              AF_INET6, 128, l3index, key->key, key->keylen,
+                              AF_INET6, 128, l3index, key->flags, key->key, key->keylen,
                               sk_gfp_mask(sk, GFP_ATOMIC));
        }
 #endif
index ea53847b5b7e8b82f1898fa442327a9ce060085f..8d785232b4796b7cafe14a35dedcbb0aaa2c37c2 100644 (file)
@@ -133,7 +133,8 @@ static int compute_score(struct sock *sk, struct net *net,
        dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif);
        if (!dev_match)
                return -1;
-       score++;
+       if (sk->sk_bound_dev_if)
+               score++;
 
        if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
                score++;
@@ -1303,7 +1304,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        int addr_len = msg->msg_namelen;
        bool connected = false;
        int ulen = len;
-       int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
+       int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
        int err;
        int is_udplite = IS_UDPLITE(sk);
        int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
index efbefcbac3ac6407d8a56e53270c2d566ba0c5f9..7cab1cf09bf1ad7ca59931f85cdf022ce2052524 100644 (file)
@@ -60,7 +60,10 @@ static struct mesh_table *mesh_table_alloc(void)
        atomic_set(&newtbl->entries,  0);
        spin_lock_init(&newtbl->gates_lock);
        spin_lock_init(&newtbl->walk_lock);
-       rhashtable_init(&newtbl->rhead, &mesh_rht_params);
+       if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) {
+               kfree(newtbl);
+               return NULL;
+       }
 
        return newtbl;
 }
index 204830a55240b4829991f4d815a6f1a10538c0f5..3fbd0b9ff9135474cdc8d900c52daa1958b1cadd 100644 (file)
@@ -2,6 +2,7 @@
 /*
  * Copyright 2012-2013, Marco Porsch <marco.porsch@s2005.tu-chemnitz.de>
  * Copyright 2012-2013, cozybit Inc.
+ * Copyright (C) 2021 Intel Corporation
  */
 
 #include "mesh.h"
@@ -588,7 +589,7 @@ void ieee80211_mps_frame_release(struct sta_info *sta,
 
        /* only transmit to PS STA with announced, non-zero awake window */
        if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
-           (!elems->awake_window || !le16_to_cpu(*elems->awake_window)))
+           (!elems->awake_window || !get_unaligned_le16(elems->awake_window)))
                return;
 
        if (!test_sta_flag(sta, WLAN_STA_MPSP_OWNER))
index e5935e3d7a078ff7e3e87f4a845e959712459b71..8c6416129d5bed9669a77419b1e6f230cdc24063 100644 (file)
@@ -392,10 +392,6 @@ static bool rate_control_send_low(struct ieee80211_sta *pubsta,
        int mcast_rate;
        bool use_basicrate = false;
 
-       if (ieee80211_is_tx_data(txrc->skb) &&
-           info->flags & IEEE80211_TX_CTL_NO_ACK)
-               return false;
-
        if (!pubsta || rc_no_data_or_no_ack_use_min(txrc)) {
                __rate_control_send_low(txrc->hw, sband, pubsta, info,
                                        txrc->rate_idx_mask);
index 99ed68f7dc365ebc526338d883789a0fd1aa1616..c4071b015c18848ba5cbbc019d12890b3ad2640f 100644 (file)
@@ -4131,7 +4131,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
                if (!bssid)
                        return false;
                if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
-                   ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
+                   ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) ||
+                   !is_valid_ether_addr(hdr->addr2))
                        return false;
                if (ieee80211_is_beacon(hdr->frame_control))
                        return true;
index 2d1193ed3eb524219bd1df328db6531e2ac0d419..8921088a5df65f1e837e046413e669704955526d 100644 (file)
@@ -2209,7 +2209,11 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
                        }
 
                        vht_mcs = iterator.this_arg[4] >> 4;
+                       if (vht_mcs > 11)
+                               vht_mcs = 0;
                        vht_nss = iterator.this_arg[4] & 0xF;
+                       if (!vht_nss || vht_nss > 8)
+                               vht_nss = 1;
                        break;
 
                /*
@@ -3380,6 +3384,14 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
                goto out;
 
+       /* If n == 2, the "while (*frag_tail)" loop above didn't execute
+        * and  frag_tail should be &skb_shinfo(head)->frag_list.
+        * However, ieee80211_amsdu_prepare_head() can reallocate it.
+        * Reload frag_tail to have it pointing to the correct place.
+        */
+       if (n == 2)
+               frag_tail = &skb_shinfo(head)->frag_list;
+
        /*
         * Pad out the previous subframe to a multiple of 4 by adding the
         * padding to the next one, that's being added. Note that head->len
index bca47fad5a16280b808bef4c8832d6f0e0626b37..4eed23e27610439b316e8d846bd22af83399b8bf 100644 (file)
@@ -520,6 +520,9 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
                        return RX_DROP_UNUSABLE;
        }
 
+       /* reload hdr - skb might have been reallocated */
+       hdr = (void *)rx->skb->data;
+
        data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len;
        if (!rx->sta || data_len < 0)
                return RX_DROP_UNUSABLE;
@@ -749,6 +752,9 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
                        return RX_DROP_UNUSABLE;
        }
 
+       /* reload hdr - skb might have been reallocated */
+       hdr = (void *)rx->skb->data;
+
        data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - mic_len;
        if (!rx->sta || data_len < 0)
                return RX_DROP_UNUSABLE;
index f48eb6315bbb4b3f8a08101ee8624b4c49471500..292374fb077928cbf361e04d8910a545677df075 100644 (file)
@@ -36,7 +36,7 @@ static int mptcp_diag_dump_one(struct netlink_callback *cb,
        struct sock *sk;
 
        net = sock_net(in_skb->sk);
-       msk = mptcp_token_get_sock(req->id.idiag_cookie[0]);
+       msk = mptcp_token_get_sock(net, req->id.idiag_cookie[0]);
        if (!msk)
                goto out_nosk;
 
index c4f9a5ce3815307a6b54d5ac318d3eac98927d00..050eea231528bc762226bbd86d43447011428921 100644 (file)
@@ -1718,9 +1718,7 @@ static int mptcp_nl_cmd_set_flags(struct sk_buff *skb, struct genl_info *info)
 
        list_for_each_entry(entry, &pernet->local_addr_list, list) {
                if (addresses_equal(&entry->addr, &addr.addr, true)) {
-                       ret = mptcp_nl_addr_backup(net, &entry->addr, bkup);
-                       if (ret)
-                               return ret;
+                       mptcp_nl_addr_backup(net, &entry->addr, bkup);
 
                        if (bkup)
                                entry->flags |= MPTCP_PM_ADDR_FLAG_BACKUP;
index dbcebf56798fa80a87eb8abd89b35fa13ed85d60..d073b211138287342cf6c2faf6b0fc299c203205 100644 (file)
@@ -528,7 +528,6 @@ static bool mptcp_check_data_fin(struct sock *sk)
 
                sk->sk_shutdown |= RCV_SHUTDOWN;
                smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
-               set_bit(MPTCP_DATA_READY, &msk->flags);
 
                switch (sk->sk_state) {
                case TCP_ESTABLISHED:
@@ -742,10 +741,9 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
 
        /* Wake-up the reader only for in-sequence data */
        mptcp_data_lock(sk);
-       if (move_skbs_to_msk(msk, ssk)) {
-               set_bit(MPTCP_DATA_READY, &msk->flags);
+       if (move_skbs_to_msk(msk, ssk))
                sk->sk_data_ready(sk);
-       }
+
        mptcp_data_unlock(sk);
 }
 
@@ -847,7 +845,6 @@ static void mptcp_check_for_eof(struct mptcp_sock *msk)
                sk->sk_shutdown |= RCV_SHUTDOWN;
 
                smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
-               set_bit(MPTCP_DATA_READY, &msk->flags);
                sk->sk_data_ready(sk);
        }
 
@@ -1759,21 +1756,6 @@ out:
        return copied ? : ret;
 }
 
-static void mptcp_wait_data(struct sock *sk, long *timeo)
-{
-       DEFINE_WAIT_FUNC(wait, woken_wake_function);
-       struct mptcp_sock *msk = mptcp_sk(sk);
-
-       add_wait_queue(sk_sleep(sk), &wait);
-       sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
-
-       sk_wait_event(sk, timeo,
-                     test_bit(MPTCP_DATA_READY, &msk->flags), &wait);
-
-       sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
-       remove_wait_queue(sk_sleep(sk), &wait);
-}
-
 static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
                                struct msghdr *msg,
                                size_t len, int flags,
@@ -2077,19 +2059,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                }
 
                pr_debug("block timeout %ld", timeo);
-               mptcp_wait_data(sk, &timeo);
-       }
-
-       if (skb_queue_empty_lockless(&sk->sk_receive_queue) &&
-           skb_queue_empty(&msk->receive_queue)) {
-               /* entire backlog drained, clear DATA_READY. */
-               clear_bit(MPTCP_DATA_READY, &msk->flags);
-
-               /* .. race-breaker: ssk might have gotten new data
-                * after last __mptcp_move_skbs() returned false.
-                */
-               if (unlikely(__mptcp_move_skbs(msk)))
-                       set_bit(MPTCP_DATA_READY, &msk->flags);
+               sk_wait_data(sk, &timeo, NULL);
        }
 
 out_err:
@@ -2098,9 +2068,9 @@ out_err:
                        tcp_recv_timestamp(msg, sk, &tss);
        }
 
-       pr_debug("msk=%p data_ready=%d rx queue empty=%d copied=%d",
-                msk, test_bit(MPTCP_DATA_READY, &msk->flags),
-                skb_queue_empty_lockless(&sk->sk_receive_queue), copied);
+       pr_debug("msk=%p rx queue empty=%d:%d copied=%d",
+                msk, skb_queue_empty_lockless(&sk->sk_receive_queue),
+                skb_queue_empty(&msk->receive_queue), copied);
        if (!(flags & MSG_PEEK))
                mptcp_rcv_space_adjust(msk, copied);
 
@@ -2368,7 +2338,6 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
        inet_sk_state_store(sk, TCP_CLOSE);
        sk->sk_shutdown = SHUTDOWN_MASK;
        smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
-       set_bit(MPTCP_DATA_READY, &msk->flags);
        set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags);
 
        mptcp_close_wake_up(sk);
@@ -2735,7 +2704,7 @@ cleanup:
        inet_csk(sk)->icsk_mtup.probe_timestamp = tcp_jiffies32;
        mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
                struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
-               bool slow = lock_sock_fast(ssk);
+               bool slow = lock_sock_fast_nested(ssk);
 
                sock_orphan(ssk);
                unlock_sock_fast(ssk, slow);
@@ -3385,8 +3354,14 @@ unlock_fail:
 
 static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
 {
-       return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM :
-              0;
+       /* Concurrent splices from sk_receive_queue into receive_queue will
+        * always show at least one non-empty queue when checked in this order.
+        */
+       if (skb_queue_empty_lockless(&((struct sock *)msk)->sk_receive_queue) &&
+           skb_queue_empty_lockless(&msk->receive_queue))
+               return 0;
+
+       return EPOLLIN | EPOLLRDNORM;
 }
 
 static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
@@ -3421,7 +3396,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
        state = inet_sk_state_load(sk);
        pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
        if (state == TCP_LISTEN)
-               return mptcp_check_readable(msk);
+               return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM : 0;
 
        if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
                mask |= mptcp_check_readable(msk);
index d3e6fd1615f1f728b4ad51e861afee7037d23b7d..dc984676c5eb1582271645e8363f5115c0560dc4 100644 (file)
@@ -709,7 +709,7 @@ int mptcp_token_new_connect(struct sock *sk);
 void mptcp_token_accept(struct mptcp_subflow_request_sock *r,
                        struct mptcp_sock *msk);
 bool mptcp_token_exists(u32 token);
-struct mptcp_sock *mptcp_token_get_sock(u32 token);
+struct mptcp_sock *mptcp_token_get_sock(struct net *net, u32 token);
 struct mptcp_sock *mptcp_token_iter_next(const struct net *net, long *s_slot,
                                         long *s_num);
 void mptcp_token_destroy(struct mptcp_sock *msk);
index 1de7ce883c3776fe9345a45b3b6ec59d5e3644f7..6172f380dfb763b43c6d996b4896215cad9c7d7b 100644 (file)
@@ -86,7 +86,7 @@ static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
        struct mptcp_sock *msk;
        int local_id;
 
-       msk = mptcp_token_get_sock(subflow_req->token);
+       msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);
        if (!msk) {
                SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
                return NULL;
index 37127781aee987055acf380f8696a2326e50971f..7f22526346a7e6667b73599ce27771fc18824cfd 100644 (file)
@@ -108,18 +108,12 @@ bool mptcp_token_join_cookie_init_state(struct mptcp_subflow_request_sock *subfl
 
        e->valid = 0;
 
-       msk = mptcp_token_get_sock(e->token);
+       msk = mptcp_token_get_sock(net, e->token);
        if (!msk) {
                spin_unlock_bh(&join_entry_locks[i]);
                return false;
        }
 
-       /* If this fails, the token got re-used in the mean time by another
-        * mptcp socket in a different netns, i.e. entry is outdated.
-        */
-       if (!net_eq(sock_net((struct sock *)msk), net))
-               goto err_put;
-
        subflow_req->remote_nonce = e->remote_nonce;
        subflow_req->local_nonce = e->local_nonce;
        subflow_req->backup = e->backup;
@@ -128,11 +122,6 @@ bool mptcp_token_join_cookie_init_state(struct mptcp_subflow_request_sock *subfl
        subflow_req->msk = msk;
        spin_unlock_bh(&join_entry_locks[i]);
        return true;
-
-err_put:
-       spin_unlock_bh(&join_entry_locks[i]);
-       sock_put((struct sock *)msk);
-       return false;
 }
 
 void __init mptcp_join_cookie_init(void)
index a98e554b034fe712d99ac5d9dbba1cbc9c9568b3..e581b341c5beb508e73fc5d7afea05f8cb6c390b 100644 (file)
@@ -231,6 +231,7 @@ found:
 
 /**
  * mptcp_token_get_sock - retrieve mptcp connection sock using its token
+ * @net: restrict to this namespace
  * @token: token of the mptcp connection to retrieve
  *
  * This function returns the mptcp connection structure with the given token.
@@ -238,7 +239,7 @@ found:
  *
  * returns NULL if no connection with the given token value exists.
  */
-struct mptcp_sock *mptcp_token_get_sock(u32 token)
+struct mptcp_sock *mptcp_token_get_sock(struct net *net, u32 token)
 {
        struct hlist_nulls_node *pos;
        struct token_bucket *bucket;
@@ -251,11 +252,15 @@ struct mptcp_sock *mptcp_token_get_sock(u32 token)
 again:
        sk_nulls_for_each_rcu(sk, pos, &bucket->msk_chain) {
                msk = mptcp_sk(sk);
-               if (READ_ONCE(msk->token) != token)
+               if (READ_ONCE(msk->token) != token ||
+                   !net_eq(sock_net(sk), net))
                        continue;
+
                if (!refcount_inc_not_zero(&sk->sk_refcnt))
                        goto not_found;
-               if (READ_ONCE(msk->token) != token) {
+
+               if (READ_ONCE(msk->token) != token ||
+                   !net_eq(sock_net(sk), net)) {
                        sock_put(sk);
                        goto again;
                }
index e1bd6f0a0676fa1f6ecec42dc9e24f97247b33fe..5d984bec1cd865b78ee19adfe248bac57dc1d24e 100644 (file)
@@ -11,6 +11,7 @@ static struct mptcp_subflow_request_sock *build_req_sock(struct kunit *test)
                            GFP_USER);
        KUNIT_EXPECT_NOT_ERR_OR_NULL(test, req);
        mptcp_token_init_request((struct request_sock *)req);
+       sock_net_set((struct sock *)req, &init_net);
        return req;
 }
 
@@ -22,7 +23,7 @@ static void mptcp_token_test_req_basic(struct kunit *test)
        KUNIT_ASSERT_EQ(test, 0,
                        mptcp_token_new_request((struct request_sock *)req));
        KUNIT_EXPECT_NE(test, 0, (int)req->token);
-       KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(req->token));
+       KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(&init_net, req->token));
 
        /* cleanup */
        mptcp_token_destroy_request((struct request_sock *)req);
@@ -55,6 +56,7 @@ static struct mptcp_sock *build_msk(struct kunit *test)
        msk = kunit_kzalloc(test, sizeof(struct mptcp_sock), GFP_USER);
        KUNIT_EXPECT_NOT_ERR_OR_NULL(test, msk);
        refcount_set(&((struct sock *)msk)->sk_refcnt, 1);
+       sock_net_set((struct sock *)msk, &init_net);
        return msk;
 }
 
@@ -74,11 +76,11 @@ static void mptcp_token_test_msk_basic(struct kunit *test)
                        mptcp_token_new_connect((struct sock *)icsk));
        KUNIT_EXPECT_NE(test, 0, (int)ctx->token);
        KUNIT_EXPECT_EQ(test, ctx->token, msk->token);
-       KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(ctx->token));
+       KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(&init_net, ctx->token));
        KUNIT_EXPECT_EQ(test, 2, (int)refcount_read(&sk->sk_refcnt));
 
        mptcp_token_destroy(msk);
-       KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(ctx->token));
+       KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(&init_net, ctx->token));
 }
 
 static void mptcp_token_test_accept(struct kunit *test)
@@ -90,11 +92,11 @@ static void mptcp_token_test_accept(struct kunit *test)
                        mptcp_token_new_request((struct request_sock *)req));
        msk->token = req->token;
        mptcp_token_accept(req, msk);
-       KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(msk->token));
+       KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(&init_net, msk->token));
 
        /* this is now a no-op */
        mptcp_token_destroy_request((struct request_sock *)req);
-       KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(msk->token));
+       KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(&init_net, msk->token));
 
        /* cleanup */
        mptcp_token_destroy(msk);
@@ -116,7 +118,7 @@ static void mptcp_token_test_destroyed(struct kunit *test)
 
        /* simulate race on removal */
        refcount_set(&sk->sk_refcnt, 0);
-       KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(msk->token));
+       KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(&init_net, msk->token));
 
        /* cleanup */
        mptcp_token_destroy(msk);
index 54395266339d7352ef3fe370d5b057580c0906a6..92a747896f808673f7e395f9017531e7cf078635 100644 (file)
@@ -109,7 +109,7 @@ config NF_CONNTRACK_MARK
 config NF_CONNTRACK_SECMARK
        bool  'Connection tracking security mark support'
        depends on NETWORK_SECMARK
-       default m if NETFILTER_ADVANCED=n
+       default y if NETFILTER_ADVANCED=n
        help
          This option enables security markings to be applied to
          connections.  Typically they are copied to connections from
index 6186358eac7c5a255e48eb9670311694f0d920cd..6e391308431da0279317bb2c2ac807c0c8b3a78d 100644 (file)
@@ -130,11 +130,11 @@ htable_size(u8 hbits)
 {
        size_t hsize;
 
-       /* We must fit both into u32 in jhash and size_t */
+       /* We must fit both into u32 in jhash and INT_MAX in kvmalloc_node() */
        if (hbits > 31)
                return 0;
        hsize = jhash_size(hbits);
-       if ((((size_t)-1) - sizeof(struct htable)) / sizeof(struct hbucket *)
+       if ((INT_MAX - sizeof(struct htable)) / sizeof(struct hbucket *)
            < hsize)
                return 0;
 
index c100c6b112c81ee5cb0eccda5421372756c305ac..2c467c422dc6343a296ccfff9097069440fd66cd 100644 (file)
@@ -1468,6 +1468,10 @@ int __init ip_vs_conn_init(void)
        int idx;
 
        /* Compute size and mask */
+       if (ip_vs_conn_tab_bits < 8 || ip_vs_conn_tab_bits > 20) {
+               pr_info("conn_tab_bits not in [8, 20]. Using default value\n");
+               ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
+       }
        ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
        ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;
 
index c25097092a060bf579edec4ac9638edb4f6fd569..29ec3ef63edc7a6b59b37bd0eb5e95b25e32bf57 100644 (file)
@@ -4090,6 +4090,11 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs)
        tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode;
        tbl[idx++].data = &ipvs->sysctl_schedule_icmp;
        tbl[idx++].data = &ipvs->sysctl_ignore_tunneled;
+#ifdef CONFIG_IP_VS_DEBUG
+       /* Global sysctls must be ro in non-init netns */
+       if (!net_eq(net, &init_net))
+               tbl[idx++].mode = 0444;
+#endif
 
        ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
        if (ipvs->sysctl_hdr == NULL) {
index 94e18fb9690dd44a32829894ed286be66b561d16..770a63103c7a4240b8559a97f707588d569beba8 100644 (file)
@@ -74,10 +74,14 @@ static __read_mostly struct kmem_cache *nf_conntrack_cachep;
 static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
 static __read_mostly bool nf_conntrack_locks_all;
 
+/* serialize hash resizes and nf_ct_iterate_cleanup */
+static DEFINE_MUTEX(nf_conntrack_mutex);
+
 #define GC_SCAN_INTERVAL       (120u * HZ)
 #define GC_SCAN_MAX_DURATION   msecs_to_jiffies(10)
 
-#define MAX_CHAINLEN   64u
+#define MIN_CHAINLEN   8u
+#define MAX_CHAINLEN   (32u - MIN_CHAINLEN)
 
 static struct conntrack_gc_work conntrack_gc_work;
 
@@ -188,11 +192,13 @@ seqcount_spinlock_t nf_conntrack_generation __read_mostly;
 static siphash_key_t nf_conntrack_hash_rnd __read_mostly;
 
 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
+                             unsigned int zoneid,
                              const struct net *net)
 {
        struct {
                struct nf_conntrack_man src;
                union nf_inet_addr dst_addr;
+               unsigned int zone;
                u32 net_mix;
                u16 dport;
                u16 proto;
@@ -205,6 +211,7 @@ static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
        /* The direction must be ignored, so handle usable members manually. */
        combined.src = tuple->src;
        combined.dst_addr = tuple->dst.u3;
+       combined.zone = zoneid;
        combined.net_mix = net_hash_mix(net);
        combined.dport = (__force __u16)tuple->dst.u.all;
        combined.proto = tuple->dst.protonum;
@@ -219,15 +226,17 @@ static u32 scale_hash(u32 hash)
 
 static u32 __hash_conntrack(const struct net *net,
                            const struct nf_conntrack_tuple *tuple,
+                           unsigned int zoneid,
                            unsigned int size)
 {
-       return reciprocal_scale(hash_conntrack_raw(tuple, net), size);
+       return reciprocal_scale(hash_conntrack_raw(tuple, zoneid, net), size);
 }
 
 static u32 hash_conntrack(const struct net *net,
-                         const struct nf_conntrack_tuple *tuple)
+                         const struct nf_conntrack_tuple *tuple,
+                         unsigned int zoneid)
 {
-       return scale_hash(hash_conntrack_raw(tuple, net));
+       return scale_hash(hash_conntrack_raw(tuple, zoneid, net));
 }
 
 static bool nf_ct_get_tuple_ports(const struct sk_buff *skb,
@@ -650,9 +659,11 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct)
        do {
                sequence = read_seqcount_begin(&nf_conntrack_generation);
                hash = hash_conntrack(net,
-                                     &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+                                     &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+                                     nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_ORIGINAL));
                reply_hash = hash_conntrack(net,
-                                          &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+                                          &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+                                          nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
        } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
 
        clean_from_lists(ct);
@@ -819,8 +830,20 @@ struct nf_conntrack_tuple_hash *
 nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
                      const struct nf_conntrack_tuple *tuple)
 {
-       return __nf_conntrack_find_get(net, zone, tuple,
-                                      hash_conntrack_raw(tuple, net));
+       unsigned int rid, zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL);
+       struct nf_conntrack_tuple_hash *thash;
+
+       thash = __nf_conntrack_find_get(net, zone, tuple,
+                                       hash_conntrack_raw(tuple, zone_id, net));
+
+       if (thash)
+               return thash;
+
+       rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY);
+       if (rid != zone_id)
+               return __nf_conntrack_find_get(net, zone, tuple,
+                                              hash_conntrack_raw(tuple, rid, net));
+       return thash;
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
 
@@ -842,6 +865,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
        unsigned int hash, reply_hash;
        struct nf_conntrack_tuple_hash *h;
        struct hlist_nulls_node *n;
+       unsigned int max_chainlen;
        unsigned int chainlen = 0;
        unsigned int sequence;
        int err = -EEXIST;
@@ -852,18 +876,22 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
        do {
                sequence = read_seqcount_begin(&nf_conntrack_generation);
                hash = hash_conntrack(net,
-                                     &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+                                     &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+                                     nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_ORIGINAL));
                reply_hash = hash_conntrack(net,
-                                          &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+                                          &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+                                          nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
        } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
 
+       max_chainlen = MIN_CHAINLEN + prandom_u32_max(MAX_CHAINLEN);
+
        /* See if there's one in the list already, including reverse */
        hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) {
                if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
                                    zone, net))
                        goto out;
 
-               if (chainlen++ > MAX_CHAINLEN)
+               if (chainlen++ > max_chainlen)
                        goto chaintoolong;
        }
 
@@ -873,7 +901,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
                if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
                                    zone, net))
                        goto out;
-               if (chainlen++ > MAX_CHAINLEN)
+               if (chainlen++ > max_chainlen)
                        goto chaintoolong;
        }
 
@@ -1103,8 +1131,8 @@ drop:
 int
 __nf_conntrack_confirm(struct sk_buff *skb)
 {
+       unsigned int chainlen = 0, sequence, max_chainlen;
        const struct nf_conntrack_zone *zone;
-       unsigned int chainlen = 0, sequence;
        unsigned int hash, reply_hash;
        struct nf_conntrack_tuple_hash *h;
        struct nf_conn *ct;
@@ -1133,8 +1161,8 @@ __nf_conntrack_confirm(struct sk_buff *skb)
                hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
                hash = scale_hash(hash);
                reply_hash = hash_conntrack(net,
-                                          &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
-
+                                          &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+                                          nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
        } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
 
        /* We're not in hash table, and we refuse to set up related
@@ -1168,6 +1196,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
                goto dying;
        }
 
+       max_chainlen = MIN_CHAINLEN + prandom_u32_max(MAX_CHAINLEN);
        /* See if there's one in the list already, including reverse:
           NAT could have grabbed it without realizing, since we're
           not in the hash.  If there is, we lost race. */
@@ -1175,7 +1204,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
                if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
                                    zone, net))
                        goto out;
-               if (chainlen++ > MAX_CHAINLEN)
+               if (chainlen++ > max_chainlen)
                        goto chaintoolong;
        }
 
@@ -1184,7 +1213,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
                if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
                                    zone, net))
                        goto out;
-               if (chainlen++ > MAX_CHAINLEN) {
+               if (chainlen++ > max_chainlen) {
 chaintoolong:
                        nf_ct_add_to_dying_list(ct);
                        NF_CT_STAT_INC(net, chaintoolong);
@@ -1246,7 +1275,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
        rcu_read_lock();
  begin:
        nf_conntrack_get_ht(&ct_hash, &hsize);
-       hash = __hash_conntrack(net, tuple, hsize);
+       hash = __hash_conntrack(net, tuple, nf_ct_zone_id(zone, IP_CT_DIR_REPLY), hsize);
 
        hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
                ct = nf_ct_tuplehash_to_ctrack(h);
@@ -1687,8 +1716,8 @@ resolve_normal_ct(struct nf_conn *tmpl,
        struct nf_conntrack_tuple_hash *h;
        enum ip_conntrack_info ctinfo;
        struct nf_conntrack_zone tmp;
+       u32 hash, zone_id, rid;
        struct nf_conn *ct;
-       u32 hash;
 
        if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
                             dataoff, state->pf, protonum, state->net,
@@ -1699,8 +1728,20 @@ resolve_normal_ct(struct nf_conn *tmpl,
 
        /* look for tuple match */
        zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
-       hash = hash_conntrack_raw(&tuple, state->net);
+
+       zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL);
+       hash = hash_conntrack_raw(&tuple, zone_id, state->net);
        h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);
+
+       if (!h) {
+               rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY);
+               if (zone_id != rid) {
+                       u32 tmp = hash_conntrack_raw(&tuple, rid, state->net);
+
+                       h = __nf_conntrack_find_get(state->net, zone, &tuple, tmp);
+               }
+       }
+
        if (!h) {
                h = init_conntrack(state->net, tmpl, &tuple,
                                   skb, dataoff, hash);
@@ -2225,28 +2266,31 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
        spinlock_t *lockp;
 
        for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
+               struct hlist_nulls_head *hslot = &nf_conntrack_hash[*bucket];
+
+               if (hlist_nulls_empty(hslot))
+                       continue;
+
                lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
                local_bh_disable();
                nf_conntrack_lock(lockp);
-               if (*bucket < nf_conntrack_htable_size) {
-                       hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
-                               if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY)
-                                       continue;
-                               /* All nf_conn objects are added to hash table twice, one
-                                * for original direction tuple, once for the reply tuple.
-                                *
-                                * Exception: In the IPS_NAT_CLASH case, only the reply
-                                * tuple is added (the original tuple already existed for
-                                * a different object).
-                                *
-                                * We only need to call the iterator once for each
-                                * conntrack, so we just use the 'reply' direction
-                                * tuple while iterating.
-                                */
-                               ct = nf_ct_tuplehash_to_ctrack(h);
-                               if (iter(ct, data))
-                                       goto found;
-                       }
+               hlist_nulls_for_each_entry(h, n, hslot, hnnode) {
+                       if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY)
+                               continue;
+                       /* All nf_conn objects are added to hash table twice, one
+                        * for original direction tuple, once for the reply tuple.
+                        *
+                        * Exception: In the IPS_NAT_CLASH case, only the reply
+                        * tuple is added (the original tuple already existed for
+                        * a different object).
+                        *
+                        * We only need to call the iterator once for each
+                        * conntrack, so we just use the 'reply' direction
+                        * tuple while iterating.
+                        */
+                       ct = nf_ct_tuplehash_to_ctrack(h);
+                       if (iter(ct, data))
+                               goto found;
                }
                spin_unlock(lockp);
                local_bh_enable();
@@ -2264,26 +2308,20 @@ found:
 static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
                                  void *data, u32 portid, int report)
 {
-       unsigned int bucket = 0, sequence;
+       unsigned int bucket = 0;
        struct nf_conn *ct;
 
        might_sleep();
 
-       for (;;) {
-               sequence = read_seqcount_begin(&nf_conntrack_generation);
-
-               while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
-                       /* Time to push up daises... */
+       mutex_lock(&nf_conntrack_mutex);
+       while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
+               /* Time to push up daises... */
 
-                       nf_ct_delete(ct, portid, report);
-                       nf_ct_put(ct);
-                       cond_resched();
-               }
-
-               if (!read_seqcount_retry(&nf_conntrack_generation, sequence))
-                       break;
-               bucket = 0;
+               nf_ct_delete(ct, portid, report);
+               nf_ct_put(ct);
+               cond_resched();
        }
+       mutex_unlock(&nf_conntrack_mutex);
 }
 
 struct iter_data {
@@ -2519,8 +2557,10 @@ int nf_conntrack_hash_resize(unsigned int hashsize)
        if (!hash)
                return -ENOMEM;
 
+       mutex_lock(&nf_conntrack_mutex);
        old_size = nf_conntrack_htable_size;
        if (old_size == hashsize) {
+               mutex_unlock(&nf_conntrack_mutex);
                kvfree(hash);
                return 0;
        }
@@ -2537,12 +2577,16 @@ int nf_conntrack_hash_resize(unsigned int hashsize)
 
        for (i = 0; i < nf_conntrack_htable_size; i++) {
                while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
+                       unsigned int zone_id;
+
                        h = hlist_nulls_entry(nf_conntrack_hash[i].first,
                                              struct nf_conntrack_tuple_hash, hnnode);
                        ct = nf_ct_tuplehash_to_ctrack(h);
                        hlist_nulls_del_rcu(&h->hnnode);
+
+                       zone_id = nf_ct_zone_id(nf_ct_zone(ct), NF_CT_DIRECTION(h));
                        bucket = __hash_conntrack(nf_ct_net(ct),
-                                                 &h->tuple, hashsize);
+                                                 &h->tuple, zone_id, hashsize);
                        hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
                }
        }
@@ -2556,6 +2600,8 @@ int nf_conntrack_hash_resize(unsigned int hashsize)
        nf_conntrack_all_unlock();
        local_bh_enable();
 
+       mutex_unlock(&nf_conntrack_mutex);
+
        synchronize_net();
        kvfree(old_hash);
        return 0;
index 7008961f5cb08b828530bcc23601694362d5a57c..273117683922858eca82f7192f022533422de590 100644 (file)
@@ -150,13 +150,16 @@ static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
 
 /* We keep an extra hash for each conntrack, for fast searching. */
 static unsigned int
-hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
+hash_by_src(const struct net *net,
+           const struct nf_conntrack_zone *zone,
+           const struct nf_conntrack_tuple *tuple)
 {
        unsigned int hash;
        struct {
                struct nf_conntrack_man src;
                u32 net_mix;
                u32 protonum;
+               u32 zone;
        } __aligned(SIPHASH_ALIGNMENT) combined;
 
        get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
@@ -165,9 +168,13 @@ hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
 
        /* Original src, to ensure we map it consistently if poss. */
        combined.src = tuple->src;
-       combined.net_mix = net_hash_mix(n);
+       combined.net_mix = net_hash_mix(net);
        combined.protonum = tuple->dst.protonum;
 
+       /* Zone ID can be used provided its valid for both directions */
+       if (zone->dir == NF_CT_DEFAULT_ZONE_DIR)
+               combined.zone = zone->id;
+
        hash = siphash(&combined, sizeof(combined), &nf_nat_hash_rnd);
 
        return reciprocal_scale(hash, nf_nat_htable_size);
@@ -272,7 +279,7 @@ find_appropriate_src(struct net *net,
                     struct nf_conntrack_tuple *result,
                     const struct nf_nat_range2 *range)
 {
-       unsigned int h = hash_by_src(net, tuple);
+       unsigned int h = hash_by_src(net, zone, tuple);
        const struct nf_conn *ct;
 
        hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) {
@@ -619,7 +626,7 @@ nf_nat_setup_info(struct nf_conn *ct,
                unsigned int srchash;
                spinlock_t *lock;
 
-               srchash = hash_by_src(net,
+               srchash = hash_by_src(net, nf_ct_zone(ct),
                                      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
                lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS];
                spin_lock_bh(lock);
@@ -788,7 +795,7 @@ static void __nf_nat_cleanup_conntrack(struct nf_conn *ct)
 {
        unsigned int h;
 
-       h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+       h = hash_by_src(nf_ct_net(ct), nf_ct_zone(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
        spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
        hlist_del_rcu(&ct->nat_bysource);
        spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
index 8e8a65d46345b292c8fcac2fc7fcdfd1be1c5557..acd73f717a0883d791fc351851a98bac4144705f 100644 (file)
@@ -9,8 +9,19 @@
 
 #include <net/netfilter/nf_nat_masquerade.h>
 
+struct masq_dev_work {
+       struct work_struct work;
+       struct net *net;
+       union nf_inet_addr addr;
+       int ifindex;
+       int (*iter)(struct nf_conn *i, void *data);
+};
+
+#define MAX_MASQ_WORKER_COUNT  16
+
 static DEFINE_MUTEX(masq_mutex);
 static unsigned int masq_refcnt __read_mostly;
+static atomic_t masq_worker_count __read_mostly;
 
 unsigned int
 nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
@@ -63,13 +74,71 @@ nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
 }
 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4);
 
-static int device_cmp(struct nf_conn *i, void *ifindex)
+static void iterate_cleanup_work(struct work_struct *work)
+{
+       struct masq_dev_work *w;
+
+       w = container_of(work, struct masq_dev_work, work);
+
+       nf_ct_iterate_cleanup_net(w->net, w->iter, (void *)w, 0, 0);
+
+       put_net(w->net);
+       kfree(w);
+       atomic_dec(&masq_worker_count);
+       module_put(THIS_MODULE);
+}
+
+/* Iterate conntrack table in the background and remove conntrack entries
+ * that use the device/address being removed.
+ *
+ * In case too many work items have been queued already or memory allocation
+ * fails iteration is skipped, conntrack entries will time out eventually.
+ */
+static void nf_nat_masq_schedule(struct net *net, union nf_inet_addr *addr,
+                                int ifindex,
+                                int (*iter)(struct nf_conn *i, void *data),
+                                gfp_t gfp_flags)
+{
+       struct masq_dev_work *w;
+
+       if (atomic_read(&masq_worker_count) > MAX_MASQ_WORKER_COUNT)
+               return;
+
+       net = maybe_get_net(net);
+       if (!net)
+               return;
+
+       if (!try_module_get(THIS_MODULE))
+               goto err_module;
+
+       w = kzalloc(sizeof(*w), gfp_flags);
+       if (w) {
+               /* We can overshoot MAX_MASQ_WORKER_COUNT, no big deal */
+               atomic_inc(&masq_worker_count);
+
+               INIT_WORK(&w->work, iterate_cleanup_work);
+               w->ifindex = ifindex;
+               w->net = net;
+               w->iter = iter;
+               if (addr)
+                       w->addr = *addr;
+               schedule_work(&w->work);
+               return;
+       }
+
+       module_put(THIS_MODULE);
+ err_module:
+       put_net(net);
+}
+
+static int device_cmp(struct nf_conn *i, void *arg)
 {
        const struct nf_conn_nat *nat = nfct_nat(i);
+       const struct masq_dev_work *w = arg;
 
        if (!nat)
                return 0;
-       return nat->masq_index == (int)(long)ifindex;
+       return nat->masq_index == w->ifindex;
 }
 
 static int masq_device_event(struct notifier_block *this,
@@ -85,8 +154,8 @@ static int masq_device_event(struct notifier_block *this,
                 * and forget them.
                 */
 
-               nf_ct_iterate_cleanup_net(net, device_cmp,
-                                         (void *)(long)dev->ifindex, 0, 0);
+               nf_nat_masq_schedule(net, NULL, dev->ifindex,
+                                    device_cmp, GFP_KERNEL);
        }
 
        return NOTIFY_DONE;
@@ -94,35 +163,45 @@ static int masq_device_event(struct notifier_block *this,
 
 static int inet_cmp(struct nf_conn *ct, void *ptr)
 {
-       struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
-       struct net_device *dev = ifa->ifa_dev->dev;
        struct nf_conntrack_tuple *tuple;
+       struct masq_dev_work *w = ptr;
 
-       if (!device_cmp(ct, (void *)(long)dev->ifindex))
+       if (!device_cmp(ct, ptr))
                return 0;
 
        tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
 
-       return ifa->ifa_address == tuple->dst.u3.ip;
+       return nf_inet_addr_cmp(&w->addr, &tuple->dst.u3);
 }
 
 static int masq_inet_event(struct notifier_block *this,
                           unsigned long event,
                           void *ptr)
 {
-       struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev;
-       struct net *net = dev_net(idev->dev);
+       const struct in_ifaddr *ifa = ptr;
+       const struct in_device *idev;
+       const struct net_device *dev;
+       union nf_inet_addr addr;
+
+       if (event != NETDEV_DOWN)
+               return NOTIFY_DONE;
 
        /* The masq_dev_notifier will catch the case of the device going
         * down.  So if the inetdev is dead and being destroyed we have
         * no work to do.  Otherwise this is an individual address removal
         * and we have to perform the flush.
         */
+       idev = ifa->ifa_dev;
        if (idev->dead)
                return NOTIFY_DONE;
 
-       if (event == NETDEV_DOWN)
-               nf_ct_iterate_cleanup_net(net, inet_cmp, ptr, 0, 0);
+       memset(&addr, 0, sizeof(addr));
+
+       addr.ip = ifa->ifa_address;
+
+       dev = idev->dev;
+       nf_nat_masq_schedule(dev_net(idev->dev), &addr, dev->ifindex,
+                            inet_cmp, GFP_KERNEL);
 
        return NOTIFY_DONE;
 }
@@ -136,8 +215,6 @@ static struct notifier_block masq_inet_notifier = {
 };
 
 #if IS_ENABLED(CONFIG_IPV6)
-static atomic_t v6_worker_count __read_mostly;
-
 static int
 nat_ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
                       const struct in6_addr *daddr, unsigned int srcprefs,
@@ -187,40 +264,6 @@ nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
 }
 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6);
 
-struct masq_dev_work {
-       struct work_struct work;
-       struct net *net;
-       struct in6_addr addr;
-       int ifindex;
-};
-
-static int inet6_cmp(struct nf_conn *ct, void *work)
-{
-       struct masq_dev_work *w = (struct masq_dev_work *)work;
-       struct nf_conntrack_tuple *tuple;
-
-       if (!device_cmp(ct, (void *)(long)w->ifindex))
-               return 0;
-
-       tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
-
-       return ipv6_addr_equal(&w->addr, &tuple->dst.u3.in6);
-}
-
-static void iterate_cleanup_work(struct work_struct *work)
-{
-       struct masq_dev_work *w;
-
-       w = container_of(work, struct masq_dev_work, work);
-
-       nf_ct_iterate_cleanup_net(w->net, inet6_cmp, (void *)w, 0, 0);
-
-       put_net(w->net);
-       kfree(w);
-       atomic_dec(&v6_worker_count);
-       module_put(THIS_MODULE);
-}
-
 /* atomic notifier; can't call nf_ct_iterate_cleanup_net (it can sleep).
  *
  * Defer it to the system workqueue.
@@ -233,36 +276,19 @@ static int masq_inet6_event(struct notifier_block *this,
 {
        struct inet6_ifaddr *ifa = ptr;
        const struct net_device *dev;
-       struct masq_dev_work *w;
-       struct net *net;
+       union nf_inet_addr addr;
 
-       if (event != NETDEV_DOWN || atomic_read(&v6_worker_count) >= 16)
+       if (event != NETDEV_DOWN)
                return NOTIFY_DONE;
 
        dev = ifa->idev->dev;
-       net = maybe_get_net(dev_net(dev));
-       if (!net)
-               return NOTIFY_DONE;
 
-       if (!try_module_get(THIS_MODULE))
-               goto err_module;
+       memset(&addr, 0, sizeof(addr));
 
-       w = kmalloc(sizeof(*w), GFP_ATOMIC);
-       if (w) {
-               atomic_inc(&v6_worker_count);
-
-               INIT_WORK(&w->work, iterate_cleanup_work);
-               w->ifindex = dev->ifindex;
-               w->net = net;
-               w->addr = ifa->addr;
-               schedule_work(&w->work);
+       addr.in6 = ifa->addr;
 
-               return NOTIFY_DONE;
-       }
-
-       module_put(THIS_MODULE);
- err_module:
-       put_net(net);
+       nf_nat_masq_schedule(dev_net(dev), &addr, dev->ifindex, inet_cmp,
+                            GFP_ATOMIC);
        return NOTIFY_DONE;
 }
 
index 081437dd75b7eba8bb71ac4b9ee46a6ab5c7fd40..c0851fec11d46532f87423e1ab38988bf4418172 100644 (file)
@@ -780,6 +780,7 @@ static void nf_tables_table_notify(const struct nft_ctx *ctx, int event)
 {
        struct nftables_pernet *nft_net;
        struct sk_buff *skb;
+       u16 flags = 0;
        int err;
 
        if (!ctx->report &&
@@ -790,8 +791,11 @@ static void nf_tables_table_notify(const struct nft_ctx *ctx, int event)
        if (skb == NULL)
                goto err;
 
+       if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL))
+               flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL);
+
        err = nf_tables_fill_table_info(skb, ctx->net, ctx->portid, ctx->seq,
-                                       event, 0, ctx->family, ctx->table);
+                                       event, flags, ctx->family, ctx->table);
        if (err < 0) {
                kfree_skb(skb);
                goto err;
@@ -1563,6 +1567,7 @@ static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event)
 {
        struct nftables_pernet *nft_net;
        struct sk_buff *skb;
+       u16 flags = 0;
        int err;
 
        if (!ctx->report &&
@@ -1573,8 +1578,11 @@ static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event)
        if (skb == NULL)
                goto err;
 
+       if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL))
+               flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL);
+
        err = nf_tables_fill_chain_info(skb, ctx->net, ctx->portid, ctx->seq,
-                                       event, 0, ctx->family, ctx->table,
+                                       event, flags, ctx->family, ctx->table,
                                        ctx->chain);
        if (err < 0) {
                kfree_skb(skb);
@@ -2866,8 +2874,7 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
                                    u32 flags, int family,
                                    const struct nft_table *table,
                                    const struct nft_chain *chain,
-                                   const struct nft_rule *rule,
-                                   const struct nft_rule *prule)
+                                   const struct nft_rule *rule, u64 handle)
 {
        struct nlmsghdr *nlh;
        const struct nft_expr *expr, *next;
@@ -2887,9 +2894,8 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
                         NFTA_RULE_PAD))
                goto nla_put_failure;
 
-       if (event != NFT_MSG_DELRULE && prule) {
-               if (nla_put_be64(skb, NFTA_RULE_POSITION,
-                                cpu_to_be64(prule->handle),
+       if (event != NFT_MSG_DELRULE && handle) {
+               if (nla_put_be64(skb, NFTA_RULE_POSITION, cpu_to_be64(handle),
                                 NFTA_RULE_PAD))
                        goto nla_put_failure;
        }
@@ -2925,7 +2931,10 @@ static void nf_tables_rule_notify(const struct nft_ctx *ctx,
                                  const struct nft_rule *rule, int event)
 {
        struct nftables_pernet *nft_net = nft_pernet(ctx->net);
+       const struct nft_rule *prule;
        struct sk_buff *skb;
+       u64 handle = 0;
+       u16 flags = 0;
        int err;
 
        if (!ctx->report &&
@@ -2936,9 +2945,20 @@ static void nf_tables_rule_notify(const struct nft_ctx *ctx,
        if (skb == NULL)
                goto err;
 
+       if (event == NFT_MSG_NEWRULE &&
+           !list_is_first(&rule->list, &ctx->chain->rules) &&
+           !list_is_last(&rule->list, &ctx->chain->rules)) {
+               prule = list_prev_entry(rule, list);
+               handle = prule->handle;
+       }
+       if (ctx->flags & (NLM_F_APPEND | NLM_F_REPLACE))
+               flags |= NLM_F_APPEND;
+       if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL))
+               flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL);
+
        err = nf_tables_fill_rule_info(skb, ctx->net, ctx->portid, ctx->seq,
-                                      event, 0, ctx->family, ctx->table,
-                                      ctx->chain, rule, NULL);
+                                      event, flags, ctx->family, ctx->table,
+                                      ctx->chain, rule, handle);
        if (err < 0) {
                kfree_skb(skb);
                goto err;
@@ -2964,6 +2984,7 @@ static int __nf_tables_dump_rules(struct sk_buff *skb,
        struct net *net = sock_net(skb->sk);
        const struct nft_rule *rule, *prule;
        unsigned int s_idx = cb->args[0];
+       u64 handle;
 
        prule = NULL;
        list_for_each_entry_rcu(rule, &chain->rules, list) {
@@ -2975,12 +2996,17 @@ static int __nf_tables_dump_rules(struct sk_buff *skb,
                        memset(&cb->args[1], 0,
                                        sizeof(cb->args) - sizeof(cb->args[0]));
                }
+               if (prule)
+                       handle = prule->handle;
+               else
+                       handle = 0;
+
                if (nf_tables_fill_rule_info(skb, net, NETLINK_CB(cb->skb).portid,
                                        cb->nlh->nlmsg_seq,
                                        NFT_MSG_NEWRULE,
                                        NLM_F_MULTI | NLM_F_APPEND,
                                        table->family,
-                                       table, chain, rule, prule) < 0)
+                                       table, chain, rule, handle) < 0)
                        return 1;
 
                nl_dump_check_consistent(cb, nlmsg_hdr(skb));
@@ -3143,7 +3169,7 @@ static int nf_tables_getrule(struct sk_buff *skb, const struct nfnl_info *info,
 
        err = nf_tables_fill_rule_info(skb2, net, NETLINK_CB(skb).portid,
                                       info->nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0,
-                                      family, table, chain, rule, NULL);
+                                      family, table, chain, rule, 0);
        if (err < 0)
                goto err_fill_rule_info;
 
@@ -3403,17 +3429,15 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
        }
 
        if (info->nlh->nlmsg_flags & NLM_F_REPLACE) {
+               err = nft_delrule(&ctx, old_rule);
+               if (err < 0)
+                       goto err_destroy_flow_rule;
+
                trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
                if (trans == NULL) {
                        err = -ENOMEM;
                        goto err_destroy_flow_rule;
                }
-               err = nft_delrule(&ctx, old_rule);
-               if (err < 0) {
-                       nft_trans_destroy(trans);
-                       goto err_destroy_flow_rule;
-               }
-
                list_add_tail_rcu(&rule->list, &old_rule->list);
        } else {
                trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
@@ -3943,8 +3967,9 @@ static void nf_tables_set_notify(const struct nft_ctx *ctx,
                                 gfp_t gfp_flags)
 {
        struct nftables_pernet *nft_net = nft_pernet(ctx->net);
-       struct sk_buff *skb;
        u32 portid = ctx->portid;
+       struct sk_buff *skb;
+       u16 flags = 0;
        int err;
 
        if (!ctx->report &&
@@ -3955,7 +3980,10 @@ static void nf_tables_set_notify(const struct nft_ctx *ctx,
        if (skb == NULL)
                goto err;
 
-       err = nf_tables_fill_set(skb, ctx, set, event, 0);
+       if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL))
+               flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL);
+
+       err = nf_tables_fill_set(skb, ctx, set, event, flags);
        if (err < 0) {
                kfree_skb(skb);
                goto err;
@@ -4336,7 +4364,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
        if (ops->privsize != NULL)
                size = ops->privsize(nla, &desc);
        alloc_size = sizeof(*set) + size + udlen;
-       if (alloc_size < size)
+       if (alloc_size < size || alloc_size > INT_MAX)
                return -ENOMEM;
        set = kvzalloc(alloc_size, GFP_KERNEL);
        if (!set)
@@ -5231,12 +5259,13 @@ static int nf_tables_getsetelem(struct sk_buff *skb,
 static void nf_tables_setelem_notify(const struct nft_ctx *ctx,
                                     const struct nft_set *set,
                                     const struct nft_set_elem *elem,
-                                    int event, u16 flags)
+                                    int event)
 {
        struct nftables_pernet *nft_net;
        struct net *net = ctx->net;
        u32 portid = ctx->portid;
        struct sk_buff *skb;
+       u16 flags = 0;
        int err;
 
        if (!ctx->report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
@@ -5246,6 +5275,9 @@ static void nf_tables_setelem_notify(const struct nft_ctx *ctx,
        if (skb == NULL)
                goto err;
 
+       if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL))
+               flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL);
+
        err = nf_tables_fill_setelem_info(skb, ctx, 0, portid, event, flags,
                                          set, elem);
        if (err < 0) {
@@ -6921,7 +6953,7 @@ static int nf_tables_delobj(struct sk_buff *skb, const struct nfnl_info *info,
 
 void nft_obj_notify(struct net *net, const struct nft_table *table,
                    struct nft_object *obj, u32 portid, u32 seq, int event,
-                   int family, int report, gfp_t gfp)
+                   u16 flags, int family, int report, gfp_t gfp)
 {
        struct nftables_pernet *nft_net = nft_pernet(net);
        struct sk_buff *skb;
@@ -6946,8 +6978,9 @@ void nft_obj_notify(struct net *net, const struct nft_table *table,
        if (skb == NULL)
                goto err;
 
-       err = nf_tables_fill_obj_info(skb, net, portid, seq, event, 0, family,
-                                     table, obj, false);
+       err = nf_tables_fill_obj_info(skb, net, portid, seq, event,
+                                     flags & (NLM_F_CREATE | NLM_F_EXCL),
+                                     family, table, obj, false);
        if (err < 0) {
                kfree_skb(skb);
                goto err;
@@ -6964,7 +6997,7 @@ static void nf_tables_obj_notify(const struct nft_ctx *ctx,
                                 struct nft_object *obj, int event)
 {
        nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid, ctx->seq, event,
-                      ctx->family, ctx->report, GFP_KERNEL);
+                      ctx->flags, ctx->family, ctx->report, GFP_KERNEL);
 }
 
 /*
@@ -7745,6 +7778,7 @@ static void nf_tables_flowtable_notify(struct nft_ctx *ctx,
 {
        struct nftables_pernet *nft_net = nft_pernet(ctx->net);
        struct sk_buff *skb;
+       u16 flags = 0;
        int err;
 
        if (!ctx->report &&
@@ -7755,8 +7789,11 @@ static void nf_tables_flowtable_notify(struct nft_ctx *ctx,
        if (skb == NULL)
                goto err;
 
+       if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL))
+               flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL);
+
        err = nf_tables_fill_flowtable_info(skb, ctx->net, ctx->portid,
-                                           ctx->seq, event, 0,
+                                           ctx->seq, event, flags,
                                            ctx->family, flowtable, hook_list);
        if (err < 0) {
                kfree_skb(skb);
@@ -8634,7 +8671,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
                        nft_setelem_activate(net, te->set, &te->elem);
                        nf_tables_setelem_notify(&trans->ctx, te->set,
                                                 &te->elem,
-                                                NFT_MSG_NEWSETELEM, 0);
+                                                NFT_MSG_NEWSETELEM);
                        nft_trans_destroy(trans);
                        break;
                case NFT_MSG_DELSETELEM:
@@ -8642,7 +8679,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
 
                        nf_tables_setelem_notify(&trans->ctx, te->set,
                                                 &te->elem,
-                                                NFT_MSG_DELSETELEM, 0);
+                                                NFT_MSG_DELSETELEM);
                        nft_setelem_remove(net, te->set, &te->elem);
                        if (!nft_setelem_is_catchall(te->set, &te->elem)) {
                                atomic_dec(&te->set->nelems);
@@ -9599,7 +9636,6 @@ static void __nft_release_table(struct net *net, struct nft_table *table)
                table->use--;
                nf_tables_chain_destroy(&ctx);
        }
-       list_del(&table->list);
        nf_tables_table_destroy(&ctx);
 }
 
@@ -9612,6 +9648,8 @@ static void __nft_release_tables(struct net *net)
                if (nft_table_has_owner(table))
                        continue;
 
+               list_del(&table->list);
+
                __nft_release_table(net, table);
        }
 }
@@ -9619,31 +9657,38 @@ static void __nft_release_tables(struct net *net)
 static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event,
                            void *ptr)
 {
+       struct nft_table *table, *to_delete[8];
        struct nftables_pernet *nft_net;
        struct netlink_notify *n = ptr;
-       struct nft_table *table, *nt;
        struct net *net = n->net;
-       bool release = false;
+       unsigned int deleted;
+       bool restart = false;
 
        if (event != NETLINK_URELEASE || n->protocol != NETLINK_NETFILTER)
                return NOTIFY_DONE;
 
        nft_net = nft_pernet(net);
+       deleted = 0;
        mutex_lock(&nft_net->commit_mutex);
+again:
        list_for_each_entry(table, &nft_net->tables, list) {
                if (nft_table_has_owner(table) &&
                    n->portid == table->nlpid) {
                        __nft_release_hook(net, table);
-                       release = true;
+                       list_del_rcu(&table->list);
+                       to_delete[deleted++] = table;
+                       if (deleted >= ARRAY_SIZE(to_delete))
+                               break;
                }
        }
-       if (release) {
+       if (deleted) {
+               restart = deleted >= ARRAY_SIZE(to_delete);
                synchronize_rcu();
-               list_for_each_entry_safe(table, nt, &nft_net->tables, list) {
-                       if (nft_table_has_owner(table) &&
-                           n->portid == table->nlpid)
-                               __nft_release_table(net, table);
-               }
+               while (deleted)
+                       __nft_release_table(net, to_delete[--deleted]);
+
+               if (restart)
+                       goto again;
        }
        mutex_unlock(&nft_net->commit_mutex);
 
index 5b02408a920bf83a0bdcf2a0ab1d09ede0791fa6..3ced0eb6b7c3bf2c979dce2d349a6d26029a158a 100644 (file)
@@ -342,12 +342,6 @@ static void nft_netdev_event(unsigned long event, struct net_device *dev,
                return;
        }
 
-       /* UNREGISTER events are also happening on netns exit.
-        *
-        * Although nf_tables core releases all tables/chains, only this event
-        * handler provides guarantee that hook->ops.dev is still accessible,
-        * so we cannot skip exiting net namespaces.
-        */
        __nft_release_basechain(ctx);
 }
 
@@ -366,6 +360,9 @@ static int nf_tables_netdev_event(struct notifier_block *this,
            event != NETDEV_CHANGENAME)
                return NOTIFY_DONE;
 
+       if (!check_net(ctx.net))
+               return NOTIFY_DONE;
+
        nft_net = nft_pernet(ctx.net);
        mutex_lock(&nft_net->commit_mutex);
        list_for_each_entry(table, &nft_net->tables, list) {
index 272bcdb1392dfc830e9bdd3b0a6f54d7b19ca954..f69cc73c581305dfb59963ce15d8716ea7bb52fb 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/netfilter_bridge/ebtables.h>
 #include <linux/netfilter_arp/arp_tables.h>
 #include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_log.h>
 
 /* Used for matches where *info is larger than X byte */
 #define NFT_MATCH_LARGE_THRESH 192
@@ -257,8 +258,22 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
        nft_compat_wait_for_destructors();
 
        ret = xt_check_target(&par, size, proto, inv);
-       if (ret < 0)
+       if (ret < 0) {
+               if (ret == -ENOENT) {
+                       const char *modname = NULL;
+
+                       if (strcmp(target->name, "LOG") == 0)
+                               modname = "nf_log_syslog";
+                       else if (strcmp(target->name, "NFLOG") == 0)
+                               modname = "nfnetlink_log";
+
+                       if (modname &&
+                           nft_request_module(ctx->net, "%s", modname) == -EAGAIN)
+                               return -EAGAIN;
+               }
+
                return ret;
+       }
 
        /* The standard target cannot be used */
        if (!target->target)
index 0363f533a42b8e8ac8b19bd10c197cabc206ea54..c4d1389f7185aef1429f2864eec8a673bb49b57e 100644 (file)
@@ -60,7 +60,7 @@ static void nft_quota_obj_eval(struct nft_object *obj,
        if (overquota &&
            !test_and_set_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags))
                nft_obj_notify(nft_net(pkt), obj->key.table, obj, 0, 0,
-                              NFT_MSG_NEWOBJ, nft_pf(pkt), 0, GFP_ATOMIC);
+                              NFT_MSG_NEWOBJ, 0, nft_pf(pkt), 0, GFP_ATOMIC);
 }
 
 static int nft_quota_do_init(const struct nlattr * const tb[],
index 7b2f359bfce46160acc3c29e7a9b4b68bbe3b731..2f7cf5ecebf4f3b66f817e6e3d17e5114924189f 100644 (file)
@@ -137,7 +137,7 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
 {
        int ret;
 
-       info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
+       info->timer = kzalloc(sizeof(*info->timer), GFP_KERNEL);
        if (!info->timer) {
                ret = -ENOMEM;
                goto out;
index 2ff75f7637b09dddf0e9b91ccd22b9b41dc6d05d..f39244f9c0ed94348b1d16ff468c1f78da1a0766 100644 (file)
@@ -44,6 +44,7 @@ log_tg(struct sk_buff *skb, const struct xt_action_param *par)
 static int log_tg_check(const struct xt_tgchk_param *par)
 {
        const struct xt_log_info *loginfo = par->targinfo;
+       int ret;
 
        if (par->family != NFPROTO_IPV4 && par->family != NFPROTO_IPV6)
                return -EINVAL;
@@ -58,7 +59,14 @@ static int log_tg_check(const struct xt_tgchk_param *par)
                return -EINVAL;
        }
 
-       return nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);
+       ret = nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);
+       if (ret != 0 && !par->nft_compat) {
+               request_module("%s", "nf_log_syslog");
+
+               ret = nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);
+       }
+
+       return ret;
 }
 
 static void log_tg_destroy(const struct xt_tgdtor_param *par)
index fb579320805984d6c51d2d24bbcfd71f737bea33..e660c3710a10968909b98b5236536996cbb8462b 100644 (file)
@@ -42,13 +42,21 @@ nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
 static int nflog_tg_check(const struct xt_tgchk_param *par)
 {
        const struct xt_nflog_info *info = par->targinfo;
+       int ret;
 
        if (info->flags & ~XT_NFLOG_MASK)
                return -EINVAL;
        if (info->prefix[sizeof(info->prefix) - 1] != '\0')
                return -EINVAL;
 
-       return nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG);
+       ret = nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG);
+       if (ret != 0 && !par->nft_compat) {
+               request_module("%s", "nfnetlink_log");
+
+               ret = nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG);
+       }
+
+       return ret;
 }
 
 static void nflog_tg_destroy(const struct xt_tgdtor_param *par)
index 24b7cf447bc55b21e74cd82ba19528a575956981..ada47e59647a03e69f64a2ea935b5e8ec9785fe5 100644 (file)
@@ -594,7 +594,10 @@ static int netlink_insert(struct sock *sk, u32 portid)
 
        /* We need to ensure that the socket is hashed and visible. */
        smp_wmb();
-       nlk_sk(sk)->bound = portid;
+       /* Paired with lockless reads from netlink_bind(),
+        * netlink_connect() and netlink_sendmsg().
+        */
+       WRITE_ONCE(nlk_sk(sk)->bound, portid);
 
 err:
        release_sock(sk);
@@ -1012,7 +1015,8 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
        if (nlk->ngroups < BITS_PER_LONG)
                groups &= (1UL << nlk->ngroups) - 1;
 
-       bound = nlk->bound;
+       /* Paired with WRITE_ONCE() in netlink_insert() */
+       bound = READ_ONCE(nlk->bound);
        if (bound) {
                /* Ensure nlk->portid is up-to-date. */
                smp_rmb();
@@ -1098,8 +1102,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
 
        /* No need for barriers here as we return to user-space without
         * using any of the bound attributes.
+        * Paired with WRITE_ONCE() in netlink_insert().
         */
-       if (!nlk->bound)
+       if (!READ_ONCE(nlk->bound))
                err = netlink_autobind(sock);
 
        if (err == 0) {
@@ -1888,7 +1893,8 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
                dst_group = nlk->dst_group;
        }
 
-       if (!nlk->bound) {
+       /* Paired with WRITE_ONCE() in netlink_insert() */
+       if (!READ_ONCE(nlk->bound)) {
                err = netlink_autobind(sock);
                if (err)
                        goto out;
index 6024fad905ff04f98d3788efe15815c2ea9d7526..dda323e0a4730b2eca2a7c5516ee01eb1cdd7c6e 100644 (file)
@@ -60,6 +60,9 @@ int nfc_proto_register(const struct nfc_protocol *nfc_proto)
                proto_tab[nfc_proto->id] = nfc_proto;
        write_unlock(&proto_tab_lock);
 
+       if (rc)
+               proto_unregister(nfc_proto->proto);
+
        return rc;
 }
 EXPORT_SYMBOL(nfc_proto_register);
index fefc03674f4f8333bd657d102409f5f2161ccbc2..d63d2e5dc60c97e46ae977674e113b019169682b 100644 (file)
@@ -277,6 +277,7 @@ int digital_tg_configure_hw(struct nfc_digital_dev *ddev, int type, int param)
 static int digital_tg_listen_mdaa(struct nfc_digital_dev *ddev, u8 rf_tech)
 {
        struct digital_tg_mdaa_params *params;
+       int rc;
 
        params = kzalloc(sizeof(*params), GFP_KERNEL);
        if (!params)
@@ -291,8 +292,12 @@ static int digital_tg_listen_mdaa(struct nfc_digital_dev *ddev, u8 rf_tech)
        get_random_bytes(params->nfcid2 + 2, NFC_NFCID2_MAXSIZE - 2);
        params->sc = DIGITAL_SENSF_FELICA_SC;
 
-       return digital_send_cmd(ddev, DIGITAL_CMD_TG_LISTEN_MDAA, NULL, params,
-                               500, digital_tg_recv_atr_req, NULL);
+       rc = digital_send_cmd(ddev, DIGITAL_CMD_TG_LISTEN_MDAA, NULL, params,
+                             500, digital_tg_recv_atr_req, NULL);
+       if (rc)
+               kfree(params);
+
+       return rc;
 }
 
 static int digital_tg_listen_md(struct nfc_digital_dev *ddev, u8 rf_tech)
index 84d2345c75a3f2a9e9b89dade25421b1d4a8acab..3adf4589852affc6cd3665857ac0eba0ff21b641 100644 (file)
@@ -465,8 +465,12 @@ static int digital_in_send_sdd_req(struct nfc_digital_dev *ddev,
        skb_put_u8(skb, sel_cmd);
        skb_put_u8(skb, DIGITAL_SDD_REQ_SEL_PAR);
 
-       return digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sdd_res,
-                                  target);
+       rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sdd_res,
+                                target);
+       if (rc)
+               kfree_skb(skb);
+
+       return rc;
 }
 
 static void digital_in_recv_sens_res(struct nfc_digital_dev *ddev, void *arg,
index a2e72c0038050dee96fbb865b3df504d31338249..b911ab78bed9aa14e30ea670e978987d7f827bd1 100644 (file)
@@ -334,6 +334,8 @@ static void nci_core_conn_close_rsp_packet(struct nci_dev *ndev,
                                                         ndev->cur_conn_id);
                if (conn_info) {
                        list_del(&conn_info->list);
+                       if (conn_info == ndev->rf_conn_info)
+                               ndev->rf_conn_info = NULL;
                        devm_kfree(&ndev->nfc_dev->dev, conn_info);
                }
        }
index ad9df0cb4b98dda4e2df9ffd5ad57fee7aeb3710..90866ae45573a7ec3ff783c070555e56df126c3c 100644 (file)
@@ -960,6 +960,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
        tmpl = p->tmpl;
 
        tcf_lastuse_update(&c->tcf_tm);
+       tcf_action_update_bstats(&c->common, skb);
 
        if (clear) {
                qdisc_skb_cb(skb)->post_ct = false;
@@ -1049,7 +1050,6 @@ out_push:
 
        qdisc_skb_cb(skb)->post_ct = true;
 out_clear:
-       tcf_action_update_bstats(&c->common, skb);
        if (defrag)
                qdisc_skb_cb(skb)->pkt_len = skb->len;
        return retval;
index 23b21253b3c3306a28f49ef19df77d48f265dbf6..eb6345a027e1302ee4ee9407a87ddc7db9636e86 100644 (file)
@@ -2188,18 +2188,24 @@ static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
 
        arg->count = arg->skip;
 
+       rcu_read_lock();
        idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
                /* don't return filters that are being deleted */
                if (!refcount_inc_not_zero(&f->refcnt))
                        continue;
+               rcu_read_unlock();
+
                if (arg->fn(tp, f, arg) < 0) {
                        __fl_put(f);
                        arg->stop = 1;
+                       rcu_read_lock();
                        break;
                }
                __fl_put(f);
                arg->count++;
+               rcu_read_lock();
        }
+       rcu_read_unlock();
        arg->cookie = id;
 }
 
index 5e90e9b160e3d04033594cebde5e2cf3733317b9..12f39a2dffd47542dd699d1b04b97d85c113adab 100644 (file)
@@ -513,6 +513,12 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
                return stab;
        }
 
+       if (s->size_log > STAB_SIZE_LOG_MAX ||
+           s->cell_log > STAB_SIZE_LOG_MAX) {
+               NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table");
+               return ERR_PTR(-EINVAL);
+       }
+
        stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
        if (!stab)
                return ERR_PTR(-ENOMEM);
index a579a4131d22d771c9766f5ad6cdb16ece3034c0..e1040421b79797fefaa26b8d7d3f44b91896e1de 100644 (file)
@@ -233,6 +233,9 @@ int fifo_set_limit(struct Qdisc *q, unsigned int limit)
        if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
                return 0;
 
+       if (!q->ops->change)
+               return 0;
+
        nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
        if (nla) {
                nla->nla_type = RTM_NEWQDISC;
index 8766ab5b8788042df03b0b8cdb9e35734ef1a8a7..5eb3b1b7ae5e7886789c010548e43bcff6cf4172 100644 (file)
@@ -529,22 +529,28 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
                for (i = tc.offset; i < tc.offset + tc.count; i++) {
                        struct netdev_queue *q = netdev_get_tx_queue(dev, i);
                        struct Qdisc *qdisc = rtnl_dereference(q->qdisc);
-                       struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
-                       struct gnet_stats_queue __percpu *cpu_qstats = NULL;
 
                        spin_lock_bh(qdisc_lock(qdisc));
+
                        if (qdisc_is_percpu_stats(qdisc)) {
-                               cpu_bstats = qdisc->cpu_bstats;
-                               cpu_qstats = qdisc->cpu_qstats;
+                               qlen = qdisc_qlen_sum(qdisc);
+
+                               __gnet_stats_copy_basic(NULL, &bstats,
+                                                       qdisc->cpu_bstats,
+                                                       &qdisc->bstats);
+                               __gnet_stats_copy_queue(&qstats,
+                                                       qdisc->cpu_qstats,
+                                                       &qdisc->qstats,
+                                                       qlen);
+                       } else {
+                               qlen            += qdisc->q.qlen;
+                               bstats.bytes    += qdisc->bstats.bytes;
+                               bstats.packets  += qdisc->bstats.packets;
+                               qstats.backlog  += qdisc->qstats.backlog;
+                               qstats.drops    += qdisc->qstats.drops;
+                               qstats.requeues += qdisc->qstats.requeues;
+                               qstats.overlimits += qdisc->qstats.overlimits;
                        }
-
-                       qlen = qdisc_qlen_sum(qdisc);
-                       __gnet_stats_copy_basic(NULL, &sch->bstats,
-                                               cpu_bstats, &qdisc->bstats);
-                       __gnet_stats_copy_queue(&sch->qstats,
-                                               cpu_qstats,
-                                               &qdisc->qstats,
-                                               qlen);
                        spin_unlock_bh(qdisc_lock(qdisc));
                }
 
index 1ab2fc933a214d04dfff763d2c5de65f4a67374a..b9fd18d986464f317a9fb7ce709a9728ffb75751 100644 (file)
@@ -1641,6 +1641,10 @@ static void taprio_destroy(struct Qdisc *sch)
        list_del(&q->taprio_list);
        spin_unlock(&taprio_list_lock);
 
+       /* Note that taprio_reset() might not be called if an error
+        * happens in qdisc_create(), after taprio_init() has been called.
+        */
+       hrtimer_cancel(&q->advance_timer);
 
        taprio_disable_offload(dev, q, NULL);
 
index 5ef86fdb11769d9c8a32219c5c7361fc34217b02..1f1786021d9c81fb0ebd27b027fc479faf1e72f5 100644 (file)
@@ -702,7 +702,7 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
                ch = skb_header_pointer(skb, offset, sizeof(*ch), &_ch);
 
                /* Break out if chunk length is less then minimal. */
-               if (ntohs(ch->length) < sizeof(_ch))
+               if (!ch || ntohs(ch->length) < sizeof(_ch))
                        break;
 
                ch_end = offset + SCTP_PAD4(ntohs(ch->length));
index b8fa8f1a727704df07e78c0b72e77a4ecdd76b76..c7503fd6491593fc749b9af182c646a39ed2ddbc 100644 (file)
@@ -3697,7 +3697,7 @@ struct sctp_chunk *sctp_make_strreset_req(
        outlen = (sizeof(outreq) + stream_len) * out;
        inlen = (sizeof(inreq) + stream_len) * in;
 
-       retval = sctp_make_reconf(asoc, outlen + inlen);
+       retval = sctp_make_reconf(asoc, SCTP_PAD4(outlen) + SCTP_PAD4(inlen));
        if (!retval)
                return NULL;
 
index f23f558054a7cba4fa29e430bbd6729074a55611..99acd337ba90d828c6f11099fe2a54ac91055eaa 100644 (file)
@@ -150,9 +150,11 @@ static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn)
 
 again:
        link = conn->lnk;
+       if (!smc_wr_tx_link_hold(link))
+               return -ENOLINK;
        rc = smc_cdc_get_free_slot(conn, link, &wr_buf, NULL, &pend);
        if (rc)
-               return rc;
+               goto put_out;
 
        spin_lock_bh(&conn->send_lock);
        if (link != conn->lnk) {
@@ -160,6 +162,7 @@ again:
                spin_unlock_bh(&conn->send_lock);
                smc_wr_tx_put_slot(link,
                                   (struct smc_wr_tx_pend_priv *)pend);
+               smc_wr_tx_link_put(link);
                if (again)
                        return -ENOLINK;
                again = true;
@@ -167,6 +170,8 @@ again:
        }
        rc = smc_cdc_msg_send(conn, wr_buf, pend);
        spin_unlock_bh(&conn->send_lock);
+put_out:
+       smc_wr_tx_link_put(link);
        return rc;
 }
 
index 8280c938be80102afa7c6770dc4946d9fed0f053..d2206743dc714863472f23a0cf70166569491ca0 100644 (file)
@@ -949,7 +949,7 @@ struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
                to_lnk = &lgr->lnk[i];
                break;
        }
-       if (!to_lnk) {
+       if (!to_lnk || !smc_wr_tx_link_hold(to_lnk)) {
                smc_lgr_terminate_sched(lgr);
                return NULL;
        }
@@ -981,24 +981,26 @@ again:
                read_unlock_bh(&lgr->conns_lock);
                /* pre-fetch buffer outside of send_lock, might sleep */
                rc = smc_cdc_get_free_slot(conn, to_lnk, &wr_buf, NULL, &pend);
-               if (rc) {
-                       smcr_link_down_cond_sched(to_lnk);
-                       return NULL;
-               }
+               if (rc)
+                       goto err_out;
                /* avoid race with smcr_tx_sndbuf_nonempty() */
                spin_lock_bh(&conn->send_lock);
                smc_switch_link_and_count(conn, to_lnk);
                rc = smc_switch_cursor(smc, pend, wr_buf);
                spin_unlock_bh(&conn->send_lock);
                sock_put(&smc->sk);
-               if (rc) {
-                       smcr_link_down_cond_sched(to_lnk);
-                       return NULL;
-               }
+               if (rc)
+                       goto err_out;
                goto again;
        }
        read_unlock_bh(&lgr->conns_lock);
+       smc_wr_tx_link_put(to_lnk);
        return to_lnk;
+
+err_out:
+       smcr_link_down_cond_sched(to_lnk);
+       smc_wr_tx_link_put(to_lnk);
+       return NULL;
 }
 
 static void smcr_buf_unuse(struct smc_buf_desc *rmb_desc,
index 2e7560eba9812635c22f18f41aafc38b063dd02a..72f4b72eb1753a899f164e10aa1e75620c7e5bfa 100644 (file)
@@ -383,9 +383,11 @@ int smc_llc_send_confirm_link(struct smc_link *link,
        struct smc_wr_buf *wr_buf;
        int rc;
 
+       if (!smc_wr_tx_link_hold(link))
+               return -ENOLINK;
        rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
        if (rc)
-               return rc;
+               goto put_out;
        confllc = (struct smc_llc_msg_confirm_link *)wr_buf;
        memset(confllc, 0, sizeof(*confllc));
        confllc->hd.common.type = SMC_LLC_CONFIRM_LINK;
@@ -402,6 +404,8 @@ int smc_llc_send_confirm_link(struct smc_link *link,
        confllc->max_links = SMC_LLC_ADD_LNK_MAX_LINKS;
        /* send llc message */
        rc = smc_wr_tx_send(link, pend);
+put_out:
+       smc_wr_tx_link_put(link);
        return rc;
 }
 
@@ -415,9 +419,11 @@ static int smc_llc_send_confirm_rkey(struct smc_link *send_link,
        struct smc_link *link;
        int i, rc, rtok_ix;
 
+       if (!smc_wr_tx_link_hold(send_link))
+               return -ENOLINK;
        rc = smc_llc_add_pending_send(send_link, &wr_buf, &pend);
        if (rc)
-               return rc;
+               goto put_out;
        rkeyllc = (struct smc_llc_msg_confirm_rkey *)wr_buf;
        memset(rkeyllc, 0, sizeof(*rkeyllc));
        rkeyllc->hd.common.type = SMC_LLC_CONFIRM_RKEY;
@@ -444,6 +450,8 @@ static int smc_llc_send_confirm_rkey(struct smc_link *send_link,
                (u64)sg_dma_address(rmb_desc->sgt[send_link->link_idx].sgl));
        /* send llc message */
        rc = smc_wr_tx_send(send_link, pend);
+put_out:
+       smc_wr_tx_link_put(send_link);
        return rc;
 }
 
@@ -456,9 +464,11 @@ static int smc_llc_send_delete_rkey(struct smc_link *link,
        struct smc_wr_buf *wr_buf;
        int rc;
 
+       if (!smc_wr_tx_link_hold(link))
+               return -ENOLINK;
        rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
        if (rc)
-               return rc;
+               goto put_out;
        rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf;
        memset(rkeyllc, 0, sizeof(*rkeyllc));
        rkeyllc->hd.common.type = SMC_LLC_DELETE_RKEY;
@@ -467,6 +477,8 @@ static int smc_llc_send_delete_rkey(struct smc_link *link,
        rkeyllc->rkey[0] = htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
        /* send llc message */
        rc = smc_wr_tx_send(link, pend);
+put_out:
+       smc_wr_tx_link_put(link);
        return rc;
 }
 
@@ -480,9 +492,11 @@ int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
        struct smc_wr_buf *wr_buf;
        int rc;
 
+       if (!smc_wr_tx_link_hold(link))
+               return -ENOLINK;
        rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
        if (rc)
-               return rc;
+               goto put_out;
        addllc = (struct smc_llc_msg_add_link *)wr_buf;
 
        memset(addllc, 0, sizeof(*addllc));
@@ -504,6 +518,8 @@ int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
        }
        /* send llc message */
        rc = smc_wr_tx_send(link, pend);
+put_out:
+       smc_wr_tx_link_put(link);
        return rc;
 }
 
@@ -517,9 +533,11 @@ int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id,
        struct smc_wr_buf *wr_buf;
        int rc;
 
+       if (!smc_wr_tx_link_hold(link))
+               return -ENOLINK;
        rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
        if (rc)
-               return rc;
+               goto put_out;
        delllc = (struct smc_llc_msg_del_link *)wr_buf;
 
        memset(delllc, 0, sizeof(*delllc));
@@ -536,6 +554,8 @@ int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id,
        delllc->reason = htonl(reason);
        /* send llc message */
        rc = smc_wr_tx_send(link, pend);
+put_out:
+       smc_wr_tx_link_put(link);
        return rc;
 }
 
@@ -547,9 +567,11 @@ static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16])
        struct smc_wr_buf *wr_buf;
        int rc;
 
+       if (!smc_wr_tx_link_hold(link))
+               return -ENOLINK;
        rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
        if (rc)
-               return rc;
+               goto put_out;
        testllc = (struct smc_llc_msg_test_link *)wr_buf;
        memset(testllc, 0, sizeof(*testllc));
        testllc->hd.common.type = SMC_LLC_TEST_LINK;
@@ -557,6 +579,8 @@ static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16])
        memcpy(testllc->user_data, user_data, sizeof(testllc->user_data));
        /* send llc message */
        rc = smc_wr_tx_send(link, pend);
+put_out:
+       smc_wr_tx_link_put(link);
        return rc;
 }
 
@@ -567,13 +591,16 @@ static int smc_llc_send_message(struct smc_link *link, void *llcbuf)
        struct smc_wr_buf *wr_buf;
        int rc;
 
-       if (!smc_link_usable(link))
+       if (!smc_wr_tx_link_hold(link))
                return -ENOLINK;
        rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
        if (rc)
-               return rc;
+               goto put_out;
        memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
-       return smc_wr_tx_send(link, pend);
+       rc = smc_wr_tx_send(link, pend);
+put_out:
+       smc_wr_tx_link_put(link);
+       return rc;
 }
 
 /* schedule an llc send on link, may wait for buffers,
@@ -586,13 +613,16 @@ static int smc_llc_send_message_wait(struct smc_link *link, void *llcbuf)
        struct smc_wr_buf *wr_buf;
        int rc;
 
-       if (!smc_link_usable(link))
+       if (!smc_wr_tx_link_hold(link))
                return -ENOLINK;
        rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
        if (rc)
-               return rc;
+               goto put_out;
        memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
-       return smc_wr_tx_send_wait(link, pend, SMC_LLC_WAIT_TIME);
+       rc = smc_wr_tx_send_wait(link, pend, SMC_LLC_WAIT_TIME);
+put_out:
+       smc_wr_tx_link_put(link);
+       return rc;
 }
 
 /********************************* receive ***********************************/
@@ -672,9 +702,11 @@ static int smc_llc_add_link_cont(struct smc_link *link,
        struct smc_buf_desc *rmb;
        u8 n;
 
+       if (!smc_wr_tx_link_hold(link))
+               return -ENOLINK;
        rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
        if (rc)
-               return rc;
+               goto put_out;
        addc_llc = (struct smc_llc_msg_add_link_cont *)wr_buf;
        memset(addc_llc, 0, sizeof(*addc_llc));
 
@@ -706,7 +738,10 @@ static int smc_llc_add_link_cont(struct smc_link *link,
        addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont);
        if (lgr->role == SMC_CLNT)
                addc_llc->hd.flags |= SMC_LLC_FLAG_RESP;
-       return smc_wr_tx_send(link, pend);
+       rc = smc_wr_tx_send(link, pend);
+put_out:
+       smc_wr_tx_link_put(link);
+       return rc;
 }
 
 static int smc_llc_cli_rkey_exchange(struct smc_link *link,
index c79361dfcdfb9f21d9541a7249b84227ca603b38..738a4a99c82797b7b5d28afaba5208135e1983ad 100644 (file)
@@ -496,7 +496,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn,
 /* Wakeup sndbuf consumers from any context (IRQ or process)
  * since there is more data to transmit; usable snd_wnd as max transmit
  */
-static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
+static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
 {
        struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
        struct smc_link *link = conn->lnk;
@@ -505,8 +505,11 @@ static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
        struct smc_wr_buf *wr_buf;
        int rc;
 
+       if (!link || !smc_wr_tx_link_hold(link))
+               return -ENOLINK;
        rc = smc_cdc_get_free_slot(conn, link, &wr_buf, &wr_rdma_buf, &pend);
        if (rc < 0) {
+               smc_wr_tx_link_put(link);
                if (rc == -EBUSY) {
                        struct smc_sock *smc =
                                container_of(conn, struct smc_sock, conn);
@@ -547,22 +550,7 @@ static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
 
 out_unlock:
        spin_unlock_bh(&conn->send_lock);
-       return rc;
-}
-
-static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
-{
-       struct smc_link *link = conn->lnk;
-       int rc = -ENOLINK;
-
-       if (!link)
-               return rc;
-
-       atomic_inc(&link->wr_tx_refcnt);
-       if (smc_link_usable(link))
-               rc = _smcr_tx_sndbuf_nonempty(conn);
-       if (atomic_dec_and_test(&link->wr_tx_refcnt))
-               wake_up_all(&link->wr_tx_wait);
+       smc_wr_tx_link_put(link);
        return rc;
 }
 
index 423b8709f1c9e4fd2462dc2bb929ebc24e01f454..2bc626f230a56dca33fb5617972443c8aecf98ee 100644 (file)
@@ -60,6 +60,20 @@ static inline void smc_wr_tx_set_wr_id(atomic_long_t *wr_tx_id, long val)
        atomic_long_set(wr_tx_id, val);
 }
 
+static inline bool smc_wr_tx_link_hold(struct smc_link *link)
+{
+       if (!smc_link_usable(link))
+               return false;
+       atomic_inc(&link->wr_tx_refcnt);
+       return true;
+}
+
+static inline void smc_wr_tx_link_put(struct smc_link *link)
+{
+       if (atomic_dec_and_test(&link->wr_tx_refcnt))
+               wake_up_all(&link->wr_tx_wait);
+}
+
 static inline void smc_wr_wakeup_tx_wait(struct smc_link *lnk)
 {
        wake_up_all(&lnk->wr_tx_wait);
index 3e776e3dff913f6a723ecbae6419ed2a595674ab..1f2817195549be6c39ce13cf8c6bfb100fd556f0 100644 (file)
@@ -645,7 +645,7 @@ static bool gss_check_seq_num(const struct svc_rqst *rqstp, struct rsc *rsci,
                }
                __set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win);
                goto ok;
-       } else if (seq_num <= sd->sd_max - GSS_SEQ_WIN) {
+       } else if (seq_num + GSS_SEQ_WIN <= sd->sd_max) {
                goto toolow;
        }
        if (__test_and_set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win))
index 92345c9bb60cc3b469e7cf50effe122b81c7bb89..89f9e85ae970d410774e720faa0987c069aafdab 100644 (file)
@@ -608,20 +608,42 @@ static void unix_release_sock(struct sock *sk, int embrion)
 
 static void init_peercred(struct sock *sk)
 {
-       put_pid(sk->sk_peer_pid);
-       if (sk->sk_peer_cred)
-               put_cred(sk->sk_peer_cred);
+       const struct cred *old_cred;
+       struct pid *old_pid;
+
+       spin_lock(&sk->sk_peer_lock);
+       old_pid = sk->sk_peer_pid;
+       old_cred = sk->sk_peer_cred;
        sk->sk_peer_pid  = get_pid(task_tgid(current));
        sk->sk_peer_cred = get_current_cred();
+       spin_unlock(&sk->sk_peer_lock);
+
+       put_pid(old_pid);
+       put_cred(old_cred);
 }
 
 static void copy_peercred(struct sock *sk, struct sock *peersk)
 {
-       put_pid(sk->sk_peer_pid);
-       if (sk->sk_peer_cred)
-               put_cred(sk->sk_peer_cred);
+       const struct cred *old_cred;
+       struct pid *old_pid;
+
+       if (sk < peersk) {
+               spin_lock(&sk->sk_peer_lock);
+               spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
+       } else {
+               spin_lock(&peersk->sk_peer_lock);
+               spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
+       }
+       old_pid = sk->sk_peer_pid;
+       old_cred = sk->sk_peer_cred;
        sk->sk_peer_pid  = get_pid(peersk->sk_peer_pid);
        sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
+
+       spin_unlock(&sk->sk_peer_lock);
+       spin_unlock(&peersk->sk_peer_lock);
+
+       put_pid(old_pid);
+       put_cred(old_cred);
 }
 
 static int unix_listen(struct socket *sock, int backlog)
@@ -806,7 +828,7 @@ static void unix_unhash(struct sock *sk)
 }
 
 struct proto unix_dgram_proto = {
-       .name                   = "UNIX-DGRAM",
+       .name                   = "UNIX",
        .owner                  = THIS_MODULE,
        .obj_size               = sizeof(struct unix_sock),
        .close                  = unix_close,
@@ -828,20 +850,25 @@ struct proto unix_stream_proto = {
 
 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
 {
-       struct sock *sk = NULL;
        struct unix_sock *u;
+       struct sock *sk;
+       int err;
 
        atomic_long_inc(&unix_nr_socks);
-       if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
-               goto out;
+       if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
+               err = -ENFILE;
+               goto err;
+       }
 
        if (type == SOCK_STREAM)
                sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
        else /*dgram and  seqpacket */
                sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
 
-       if (!sk)
-               goto out;
+       if (!sk) {
+               err = -ENOMEM;
+               goto err;
+       }
 
        sock_init_data(sock, sk);
 
@@ -861,20 +888,23 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
        init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
        memset(&u->scm_stat, 0, sizeof(struct scm_stat));
        unix_insert_socket(unix_sockets_unbound(sk), sk);
-out:
-       if (sk == NULL)
-               atomic_long_dec(&unix_nr_socks);
-       else {
-               local_bh_disable();
-               sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
-               local_bh_enable();
-       }
+
+       local_bh_disable();
+       sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+       local_bh_enable();
+
        return sk;
+
+err:
+       atomic_long_dec(&unix_nr_socks);
+       return ERR_PTR(err);
 }
 
 static int unix_create(struct net *net, struct socket *sock, int protocol,
                       int kern)
 {
+       struct sock *sk;
+
        if (protocol && protocol != PF_UNIX)
                return -EPROTONOSUPPORT;
 
@@ -901,7 +931,11 @@ static int unix_create(struct net *net, struct socket *sock, int protocol,
                return -ESOCKTNOSUPPORT;
        }
 
-       return unix_create1(net, sock, kern, sock->type) ? 0 : -ENOMEM;
+       sk = unix_create1(net, sock, kern, sock->type);
+       if (IS_ERR(sk))
+               return PTR_ERR(sk);
+
+       return 0;
 }
 
 static int unix_release(struct socket *sock)
@@ -1314,12 +1348,15 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
           we will have to recheck all again in any case.
         */
 
-       err = -ENOMEM;
-
        /* create new sock for complete connection */
        newsk = unix_create1(sock_net(sk), NULL, 0, sock->type);
-       if (newsk == NULL)
+       if (IS_ERR(newsk)) {
+               err = PTR_ERR(newsk);
+               newsk = NULL;
                goto out;
+       }
+
+       err = -ENOMEM;
 
        /* Allocate skb for sending to listening sock */
        skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
@@ -2845,6 +2882,9 @@ static int unix_shutdown(struct socket *sock, int mode)
 
        unix_state_lock(sk);
        sk->sk_shutdown |= mode;
+       if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
+           mode == SHUTDOWN_MASK)
+               sk->sk_state = TCP_CLOSE;
        other = unix_peer(sk);
        if (other)
                sock_hold(other);
@@ -2867,12 +2907,10 @@ static int unix_shutdown(struct socket *sock, int mode)
                other->sk_shutdown |= peer_mode;
                unix_state_unlock(other);
                other->sk_state_change(other);
-               if (peer_mode == SHUTDOWN_MASK) {
+               if (peer_mode == SHUTDOWN_MASK)
                        sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
-                       other->sk_state = TCP_CLOSE;
-               } else if (peer_mode & RCV_SHUTDOWN) {
+               else if (peer_mode & RCV_SHUTDOWN)
                        sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
-               }
        }
        if (other)
                sock_put(other);
index 03b66d154b2bfdc5ae63017532787f85350b56e8..3a3cb09eec1228f994fb23ec378879652dec9a6d 100644 (file)
@@ -1961,24 +1961,65 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
        return skb;
 }
 
+static int xfrm_notify_userpolicy(struct net *net)
+{
+       struct xfrm_userpolicy_default *up;
+       int len = NLMSG_ALIGN(sizeof(*up));
+       struct nlmsghdr *nlh;
+       struct sk_buff *skb;
+       int err;
+
+       skb = nlmsg_new(len, GFP_ATOMIC);
+       if (skb == NULL)
+               return -ENOMEM;
+
+       nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_GETDEFAULT, sizeof(*up), 0);
+       if (nlh == NULL) {
+               kfree_skb(skb);
+               return -EMSGSIZE;
+       }
+
+       up = nlmsg_data(nlh);
+       up->in = net->xfrm.policy_default & XFRM_POL_DEFAULT_IN ?
+                       XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+       up->fwd = net->xfrm.policy_default & XFRM_POL_DEFAULT_FWD ?
+                       XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+       up->out = net->xfrm.policy_default & XFRM_POL_DEFAULT_OUT ?
+                       XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+
+       nlmsg_end(skb, nlh);
+
+       rcu_read_lock();
+       err = xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
+       rcu_read_unlock();
+
+       return err;
+}
+
 static int xfrm_set_default(struct sk_buff *skb, struct nlmsghdr *nlh,
                            struct nlattr **attrs)
 {
        struct net *net = sock_net(skb->sk);
        struct xfrm_userpolicy_default *up = nlmsg_data(nlh);
-       u8 dirmask;
-       u8 old_default = net->xfrm.policy_default;
 
-       if (up->dirmask >= XFRM_USERPOLICY_DIRMASK_MAX)
-               return -EINVAL;
+       if (up->in == XFRM_USERPOLICY_BLOCK)
+               net->xfrm.policy_default |= XFRM_POL_DEFAULT_IN;
+       else if (up->in == XFRM_USERPOLICY_ACCEPT)
+               net->xfrm.policy_default &= ~XFRM_POL_DEFAULT_IN;
 
-       dirmask = (1 << up->dirmask) & XFRM_POL_DEFAULT_MASK;
+       if (up->fwd == XFRM_USERPOLICY_BLOCK)
+               net->xfrm.policy_default |= XFRM_POL_DEFAULT_FWD;
+       else if (up->fwd == XFRM_USERPOLICY_ACCEPT)
+               net->xfrm.policy_default &= ~XFRM_POL_DEFAULT_FWD;
 
-       net->xfrm.policy_default = (old_default & (0xff ^ dirmask))
-                                   | (up->action << up->dirmask);
+       if (up->out == XFRM_USERPOLICY_BLOCK)
+               net->xfrm.policy_default |= XFRM_POL_DEFAULT_OUT;
+       else if (up->out == XFRM_USERPOLICY_ACCEPT)
+               net->xfrm.policy_default &= ~XFRM_POL_DEFAULT_OUT;
 
        rt_genid_bump_all(net);
 
+       xfrm_notify_userpolicy(net);
        return 0;
 }
 
@@ -1988,13 +2029,11 @@ static int xfrm_get_default(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct sk_buff *r_skb;
        struct nlmsghdr *r_nlh;
        struct net *net = sock_net(skb->sk);
-       struct xfrm_userpolicy_default *r_up, *up;
+       struct xfrm_userpolicy_default *r_up;
        int len = NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_default));
        u32 portid = NETLINK_CB(skb).portid;
        u32 seq = nlh->nlmsg_seq;
 
-       up = nlmsg_data(nlh);
-
        r_skb = nlmsg_new(len, GFP_ATOMIC);
        if (!r_skb)
                return -ENOMEM;
@@ -2007,8 +2046,12 @@ static int xfrm_get_default(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        r_up = nlmsg_data(r_nlh);
 
-       r_up->action = ((net->xfrm.policy_default & (1 << up->dirmask)) >> up->dirmask);
-       r_up->dirmask = up->dirmask;
+       r_up->in = net->xfrm.policy_default & XFRM_POL_DEFAULT_IN ?
+                       XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+       r_up->fwd = net->xfrm.policy_default & XFRM_POL_DEFAULT_FWD ?
+                       XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+       r_up->out = net->xfrm.policy_default & XFRM_POL_DEFAULT_OUT ?
+                       XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
        nlmsg_end(r_skb, r_nlh);
 
        return nlmsg_unicast(net->xfrm.nlsk, r_skb, portid);
index 4dc20be5fb96a63ccf01b19550518a51ba878b3b..5fd48a8d4f10a550278bb35483a73e9feff9a93e 100644 (file)
@@ -322,17 +322,11 @@ $(obj)/hbm_edt_kern.o: $(src)/hbm.h $(src)/hbm_kern.h
 
 -include $(BPF_SAMPLES_PATH)/Makefile.target
 
-VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux)                           \
-                    $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux)    \
-                    ../../../../vmlinux                                \
-                    /sys/kernel/btf/vmlinux                            \
-                    /boot/vmlinux-$(shell uname -r)
+VMLINUX_BTF_PATHS ?= $(abspath $(if $(O),$(O)/vmlinux))                                \
+                    $(abspath $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux)) \
+                    $(abspath ./vmlinux)
 VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
 
-ifeq ($(VMLINUX_BTF),)
-$(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)")
-endif
-
 $(obj)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL)
 ifeq ($(VMLINUX_H),)
        $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
@@ -340,6 +334,11 @@ else
        $(Q)cp "$(VMLINUX_H)" $@
 endif
 
+ifeq ($(VMLINUX_BTF),)
+       $(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)",\
+               build the kernel or set VMLINUX_BTF variable)
+endif
+
 clean-files += vmlinux.h
 
 # Get Clang's default includes on this system, as opposed to those seen by
index aee04534483a8ca17e1af2d23fc2ebb850cf193a..29c3bb6ad1cd8a82983bfd634119eca30853f0c2 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
 /* eBPF instruction mini library */
 #ifndef __BPF_INSN_H
 #define __BPF_INSN_H
index 8f59d430cb64c8ca0bcd5acfc84274d4bb8f77c4..bb0a5a3bfcf01961cdb6db2bc1e472a5c15247b1 100644 (file)
@@ -5,11 +5,6 @@
 #include "xdp_sample.bpf.h"
 #include "xdp_sample_shared.h"
 
-enum {
-       BPF_F_BROADCAST         = (1ULL << 3),
-       BPF_F_EXCLUDE_INGRESS   = (1ULL << 4),
-};
-
 struct {
        __uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
        __uint(key_size, sizeof(int));
index 952e46876329a78a104ee91ef539c7e63d77c68c..4aad28480035582f0c4b99a973826597774d054e 100644 (file)
@@ -19,6 +19,10 @@ gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF)              \
                += -fplugin-arg-structleak_plugin-byref
 gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL)    \
                += -fplugin-arg-structleak_plugin-byref-all
+ifdef CONFIG_GCC_PLUGIN_STRUCTLEAK
+    DISABLE_STRUCTLEAK_PLUGIN += -fplugin-arg-structleak_plugin-disable
+endif
+export DISABLE_STRUCTLEAK_PLUGIN
 gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK)              \
                += -DSTRUCTLEAK_PLUGIN
 
index fd9777f63f14703d25664d5dd601e85bf635b0bb..9dbab13329fa986f0a2d6e955b8b5e1c105634f3 100755 (executable)
@@ -82,10 +82,8 @@ cat << EOF
 #define __IGNORE_truncate64
 #define __IGNORE_stat64
 #define __IGNORE_lstat64
-#define __IGNORE_fstat64
 #define __IGNORE_fcntl64
 #define __IGNORE_fadvise64_64
-#define __IGNORE_fstatat64
 #define __IGNORE_fstatfs64
 #define __IGNORE_statfs64
 #define __IGNORE_llseek
@@ -253,6 +251,10 @@ cat << EOF
 #define __IGNORE_getpmsg
 #define __IGNORE_putpmsg
 #define __IGNORE_vserver
+
+/* 64-bit ports never needed these, and new 32-bit ports can use statx */
+#define __IGNORE_fstat64
+#define __IGNORE_fstatat64
 EOF
 }
 
index 8f6b13ae46bfc4855f0a31375a4ce7d42262b53c..7d631aaa0ae118bc41e9649d811beaf4ee4fe6ec 100755 (executable)
@@ -189,7 +189,7 @@ if ($arch =~ /(x86(_64)?)|(i386)/) {
 $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\S+)";
 $weak_regex = "^[0-9a-fA-F]+\\s+([wW])\\s+(\\S+)";
 $section_regex = "Disassembly of section\\s+(\\S+):";
-$function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:";
+$function_regex = "^([0-9a-fA-F]+)\\s+<([^^]*?)>:";
 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s(mcount|__fentry__)\$";
 $section_type = '@progbits';
 $mcount_adjust = 0;
index e3d79a7b6db661113eac26533d1182d8da5bf3fd..b5d5333ab3300e86862515082946552fcf4e6e1d 100644 (file)
@@ -918,6 +918,13 @@ void key_change_session_keyring(struct callback_head *twork)
                return;
        }
 
+       /* If get_ucounts fails more bits are needed in the refcount */
+       if (unlikely(!get_ucounts(old->ucounts))) {
+               WARN_ONCE(1, "In %s get_ucounts failed\n", __func__);
+               put_cred(new);
+               return;
+       }
+
        new->  uid      = old->  uid;
        new-> euid      = old-> euid;
        new-> suid      = old-> suid;
@@ -927,6 +934,7 @@ void key_change_session_keyring(struct callback_head *twork)
        new-> sgid      = old-> sgid;
        new->fsgid      = old->fsgid;
        new->user       = get_uid(old->user);
+       new->ucounts    = old->ucounts;
        new->user_ns    = get_user_ns(old->user_ns);
        new->group_info = get_group_info(old->group_info);
 
index d59276f48d4fc12a46b93c535c014336695fb6e3..94ea2a8b2bb7366b0ab153ca21cc225ee655a78c 100644 (file)
@@ -126,6 +126,8 @@ static const struct nlmsg_perm nlmsg_xfrm_perms[] =
        { XFRM_MSG_NEWSPDINFO,  NETLINK_XFRM_SOCKET__NLMSG_WRITE },
        { XFRM_MSG_GETSPDINFO,  NETLINK_XFRM_SOCKET__NLMSG_READ  },
        { XFRM_MSG_MAPPING,     NETLINK_XFRM_SOCKET__NLMSG_READ  },
+       { XFRM_MSG_SETDEFAULT,  NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+       { XFRM_MSG_GETDEFAULT,  NETLINK_XFRM_SOCKET__NLMSG_READ  },
 };
 
 static const struct nlmsg_perm nlmsg_audit_perms[] =
@@ -189,7 +191,7 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
                 * structures at the top of this file with the new mappings
                 * before updating the BUILD_BUG_ON() macro!
                 */
-               BUILD_BUG_ON(XFRM_MSG_MAX != XFRM_MSG_MAPPING);
+               BUILD_BUG_ON(XFRM_MSG_MAX != XFRM_MSG_GETDEFAULT);
                err = nlmsg_perm(nlmsg_type, perm, nlmsg_xfrm_perms,
                                 sizeof(nlmsg_xfrm_perms));
                break;
index a59de24695ec9b0ea7d20dc309bdf05ef7574286..dfe5a64e19d2e0c6eed5d3d8e3b4cf73c1415d3d 100644 (file)
@@ -468,6 +468,76 @@ static int snd_pcm_ioctl_sync_ptr_x32(struct snd_pcm_substream *substream,
 }
 #endif /* CONFIG_X86_X32 */
 
+#ifdef __BIG_ENDIAN
+typedef char __pad_before_u32[4];
+typedef char __pad_after_u32[0];
+#else
+typedef char __pad_before_u32[0];
+typedef char __pad_after_u32[4];
+#endif
+
+/* PCM 2.0.15 API definition had a bug in mmap control; it puts the avail_min
+ * at the wrong offset due to a typo in padding type.
+ * The bug hits only 32bit.
+ * A workaround for incorrect read/write is needed only in 32bit compat mode.
+ */
+struct __snd_pcm_mmap_control64_buggy {
+       __pad_before_u32 __pad1;
+       __u32 appl_ptr;
+       __pad_before_u32 __pad2;        /* SiC! here is the bug */
+       __pad_before_u32 __pad3;
+       __u32 avail_min;
+       __pad_after_uframe __pad4;
+};
+
+static int snd_pcm_ioctl_sync_ptr_buggy(struct snd_pcm_substream *substream,
+                                       struct snd_pcm_sync_ptr __user *_sync_ptr)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct snd_pcm_sync_ptr sync_ptr;
+       struct __snd_pcm_mmap_control64_buggy *sync_cp;
+       volatile struct snd_pcm_mmap_status *status;
+       volatile struct snd_pcm_mmap_control *control;
+       int err;
+
+       memset(&sync_ptr, 0, sizeof(sync_ptr));
+       sync_cp = (struct __snd_pcm_mmap_control64_buggy *)&sync_ptr.c.control;
+       if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags)))
+               return -EFAULT;
+       if (copy_from_user(sync_cp, &(_sync_ptr->c.control), sizeof(*sync_cp)))
+               return -EFAULT;
+       status = runtime->status;
+       control = runtime->control;
+       if (sync_ptr.flags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
+               err = snd_pcm_hwsync(substream);
+               if (err < 0)
+                       return err;
+       }
+       snd_pcm_stream_lock_irq(substream);
+       if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL)) {
+               err = pcm_lib_apply_appl_ptr(substream, sync_cp->appl_ptr);
+               if (err < 0) {
+                       snd_pcm_stream_unlock_irq(substream);
+                       return err;
+               }
+       } else {
+               sync_cp->appl_ptr = control->appl_ptr;
+       }
+       if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
+               control->avail_min = sync_cp->avail_min;
+       else
+               sync_cp->avail_min = control->avail_min;
+       sync_ptr.s.status.state = status->state;
+       sync_ptr.s.status.hw_ptr = status->hw_ptr;
+       sync_ptr.s.status.tstamp = status->tstamp;
+       sync_ptr.s.status.suspended_state = status->suspended_state;
+       sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
+       snd_pcm_stream_unlock_irq(substream);
+       if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
+               return -EFAULT;
+       return 0;
+}
+
 /*
  */
 enum {
@@ -537,7 +607,7 @@ static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned l
                if (in_x32_syscall())
                        return snd_pcm_ioctl_sync_ptr_x32(substream, argp);
 #endif /* CONFIG_X86_X32 */
-               return snd_pcm_common_ioctl(file, substream, cmd, argp);
+               return snd_pcm_ioctl_sync_ptr_buggy(substream, argp);
        case SNDRV_PCM_IOCTL_HW_REFINE32:
                return snd_pcm_ioctl_hw_params_compat(substream, 1, argp);
        case SNDRV_PCM_IOCTL_HW_PARAMS32:
index 6c0a4a67ad2e39082a12090b7c22acc0f185d936..6f30231bdb88454c0d6c279f557965c1083608c4 100644 (file)
@@ -873,12 +873,21 @@ static long snd_rawmidi_ioctl(struct file *file, unsigned int cmd, unsigned long
                        return -EINVAL;
                }
        }
+       case SNDRV_RAWMIDI_IOCTL_USER_PVERSION:
+               if (get_user(rfile->user_pversion, (unsigned int __user *)arg))
+                       return -EFAULT;
+               return 0;
+
        case SNDRV_RAWMIDI_IOCTL_PARAMS:
        {
                struct snd_rawmidi_params params;
 
                if (copy_from_user(&params, argp, sizeof(struct snd_rawmidi_params)))
                        return -EFAULT;
+               if (rfile->user_pversion < SNDRV_PROTOCOL_VERSION(2, 0, 2)) {
+                       params.mode = 0;
+                       memset(params.reserved, 0, sizeof(params.reserved));
+               }
                switch (params.stream) {
                case SNDRV_RAWMIDI_STREAM_OUTPUT:
                        if (rfile->output == NULL)
index 382275c5b1937e14e8b8d65187948eb847606a63..7f3fd8eb016fe825e13394576f803f0f27603fe0 100644 (file)
@@ -156,6 +156,8 @@ static int snd_seq_device_dev_free(struct snd_device *device)
        struct snd_seq_device *dev = device->device_data;
 
        cancel_autoload_drivers();
+       if (dev->private_free)
+               dev->private_free(dev);
        put_device(&dev->dev);
        return 0;
 }
@@ -183,11 +185,7 @@ static int snd_seq_device_dev_disconnect(struct snd_device *device)
 
 static void snd_seq_dev_release(struct device *dev)
 {
-       struct snd_seq_device *sdev = to_seq_dev(dev);
-
-       if (sdev->private_free)
-               sdev->private_free(sdev);
-       kfree(sdev);
+       kfree(to_seq_dev(dev));
 }
 
 /*
index ed40d0f7432c8c771c90116aee0761a56c0f21f5..773db4bf087693d995c19df9b98d0a889e84f538 100644 (file)
@@ -143,7 +143,7 @@ enum hrtimer_restart pcsp_do_timer(struct hrtimer *handle)
        if (pointer_update)
                pcsp_pointer_update(chip);
 
-       hrtimer_forward(handle, hrtimer_get_expires(handle), ns_to_ktime(ns));
+       hrtimer_forward_now(handle, ns_to_ktime(ns));
 
        return HRTIMER_RESTART;
 }
index 5388b85fb60e5a8e887380108031f4f0774d8ba6..a18c2c033e8362e4297183a7be28598ac98143ac 100644 (file)
@@ -276,10 +276,11 @@ static void __maybe_unused copy_message(u64 *frames, __be32 *buffer,
 
        /* This is just for v2/v3 protocol. */
        for (i = 0; i < data_blocks; ++i) {
-               *frames = (be32_to_cpu(buffer[1]) << 16) |
-                         (be32_to_cpu(buffer[2]) >> 16);
+               *frames = be32_to_cpu(buffer[1]);
+               *frames <<= 16;
+               *frames |= be32_to_cpu(buffer[2]) >> 16;
+               ++frames;
                buffer += data_block_quadlets;
-               frames++;
        }
 }
 
index cb5b5e3a481b9ba3149fdbee7fb896c4bf7f7d46..daf731364695b1531c29258e8775fc54dbed78b6 100644 (file)
@@ -184,13 +184,16 @@ static int detect_quirks(struct snd_oxfw *oxfw, const struct ieee1394_device_id
                        model = val;
        }
 
-       /*
-        * Mackie Onyx Satellite with base station has a quirk to report a wrong
-        * value in 'dbs' field of CIP header against its format information.
-        */
-       if (vendor == VENDOR_LOUD && model == MODEL_SATELLITE)
+       if (vendor == VENDOR_LOUD) {
+               // Mackie Onyx Satellite with base station has a quirk to report a wrong
+               // value in 'dbs' field of CIP header against its format information.
                oxfw->quirks |= SND_OXFW_QUIRK_WRONG_DBS;
 
+               // OXFW971-based models may transfer events by blocking method.
+               if (!(oxfw->quirks & SND_OXFW_QUIRK_JUMBO_PAYLOAD))
+                       oxfw->quirks |= SND_OXFW_QUIRK_BLOCKING_TRANSMISSION;
+       }
+
        return 0;
 }
 
index 062da7a7a5861f994da1236a62b6138b158eb514..f7bd6e2db085bb666bcba9618cfdeb15541455e8 100644 (file)
@@ -421,8 +421,9 @@ int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset)
        if (!full_reset)
                goto skip_reset;
 
-       /* clear STATESTS */
-       snd_hdac_chip_writew(bus, STATESTS, STATESTS_INT_MASK);
+       /* clear STATESTS if not in reset */
+       if (snd_hdac_chip_readb(bus, GCTL) & AZX_GCTL_RESET)
+               snd_hdac_chip_writew(bus, STATESTS, STATESTS_INT_MASK);
 
        /* reset controller */
        snd_hdac_bus_enter_link_reset(bus);
index 2523b23389e97dba45ff600b88535c47c6b13d56..1c8bffc3eec6e674b36065bf447400db71aa0835 100644 (file)
@@ -298,29 +298,31 @@ int snd_hda_codec_configure(struct hda_codec *codec)
 {
        int err;
 
+       if (codec->configured)
+               return 0;
+
        if (is_generic_config(codec))
                codec->probe_id = HDA_CODEC_ID_GENERIC;
        else
                codec->probe_id = 0;
 
-       err = snd_hdac_device_register(&codec->core);
-       if (err < 0)
-               return err;
+       if (!device_is_registered(&codec->core.dev)) {
+               err = snd_hdac_device_register(&codec->core);
+               if (err < 0)
+                       return err;
+       }
 
        if (!codec->preset)
                codec_bind_module(codec);
        if (!codec->preset) {
                err = codec_bind_generic(codec);
                if (err < 0) {
-                       codec_err(codec, "Unable to bind the codec\n");
-                       goto error;
+                       codec_dbg(codec, "Unable to bind the codec\n");
+                       return err;
                }
        }
 
+       codec->configured = 1;
        return 0;
-
- error:
-       snd_hdac_device_unregister(&codec->core);
-       return err;
 }
 EXPORT_SYMBOL_GPL(snd_hda_codec_configure);
index a9ebefd60cf68a0e773af8fdad276226566456ea..0c4a337c9fc0d266db5afd4d37210a33c889e61e 100644 (file)
@@ -791,6 +791,7 @@ void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec)
        snd_array_free(&codec->nids);
        remove_conn_list(codec);
        snd_hdac_regmap_exit(&codec->core);
+       codec->configured = 0;
 }
 EXPORT_SYMBOL_GPL(snd_hda_codec_cleanup_for_unbind);
 
index 7cd452831fd30956fcf6fff20e989f684b94fe95..930ae4002a8187addcc97defc7b75573c804fd88 100644 (file)
@@ -25,6 +25,7 @@
 #include <sound/core.h>
 #include <sound/initval.h>
 #include "hda_controller.h"
+#include "hda_local.h"
 
 #define CREATE_TRACE_POINTS
 #include "hda_controller_trace.h"
@@ -1248,17 +1249,24 @@ EXPORT_SYMBOL_GPL(azx_probe_codecs);
 int azx_codec_configure(struct azx *chip)
 {
        struct hda_codec *codec, *next;
+       int success = 0;
 
-       /* use _safe version here since snd_hda_codec_configure() deregisters
-        * the device upon error and deletes itself from the bus list.
-        */
-       list_for_each_codec_safe(codec, next, &chip->bus) {
-               snd_hda_codec_configure(codec);
+       list_for_each_codec(codec, &chip->bus) {
+               if (!snd_hda_codec_configure(codec))
+                       success++;
        }
 
-       if (!azx_bus(chip)->num_codecs)
-               return -ENODEV;
-       return 0;
+       if (success) {
+               /* unregister failed codecs if any codec has been probed */
+               list_for_each_codec_safe(codec, next, &chip->bus) {
+                       if (!codec->configured) {
+                               codec_err(codec, "Unable to configure, disabling\n");
+                               snd_hdac_device_unregister(&codec->core);
+                       }
+               }
+       }
+
+       return success ? 0 : -ENODEV;
 }
 EXPORT_SYMBOL_GPL(azx_codec_configure);
 
index 3062f87380b1b41f5f273f4c26b2ceda0aed3575..f5bf295eb83078d2586a5985aaa1af4eb2840116 100644 (file)
@@ -41,7 +41,7 @@
 /* 24 unused */
 #define AZX_DCAPS_COUNT_LPIB_DELAY  (1 << 25)  /* Take LPIB as delay */
 #define AZX_DCAPS_PM_RUNTIME   (1 << 26)       /* runtime PM support */
-/* 27 unused */
+#define AZX_DCAPS_RETRY_PROBE  (1 << 27)       /* retry probe if no codec is configured */
 #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28)  /* CORBRP clears itself after reset */
 #define AZX_DCAPS_NO_MSI64      (1 << 29)      /* Stick to 32-bit MSIs */
 #define AZX_DCAPS_SEPARATE_STREAM_TAG  (1 << 30) /* capture and playback use separate stream tag */
index 3aa432d814a24a1e1affe311054005e5cb057006..4d22e7adeee8ecf11006c951f18ed10cf8e220f9 100644 (file)
@@ -307,7 +307,8 @@ enum {
 /* quirks for AMD SB */
 #define AZX_DCAPS_PRESET_AMD_SB \
        (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_AMD_WORKAROUND |\
-        AZX_DCAPS_SNOOP_TYPE(ATI) | AZX_DCAPS_PM_RUNTIME)
+        AZX_DCAPS_SNOOP_TYPE(ATI) | AZX_DCAPS_PM_RUNTIME |\
+        AZX_DCAPS_RETRY_PROBE)
 
 /* quirks for Nvidia */
 #define AZX_DCAPS_PRESET_NVIDIA \
@@ -883,10 +884,11 @@ static unsigned int azx_get_pos_skl(struct azx *chip, struct azx_dev *azx_dev)
        return azx_get_pos_posbuf(chip, azx_dev);
 }
 
-static void azx_shutdown_chip(struct azx *chip)
+static void __azx_shutdown_chip(struct azx *chip, bool skip_link_reset)
 {
        azx_stop_chip(chip);
-       azx_enter_link_reset(chip);
+       if (!skip_link_reset)
+               azx_enter_link_reset(chip);
        azx_clear_irq_pending(chip);
        display_power(chip, false);
 }
@@ -895,6 +897,11 @@ static void azx_shutdown_chip(struct azx *chip)
 static DEFINE_MUTEX(card_list_lock);
 static LIST_HEAD(card_list);
 
+static void azx_shutdown_chip(struct azx *chip)
+{
+       __azx_shutdown_chip(chip, false);
+}
+
 static void azx_add_card_list(struct azx *chip)
 {
        struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
@@ -1717,7 +1724,7 @@ static void azx_check_snoop_available(struct azx *chip)
 
 static void azx_probe_work(struct work_struct *work)
 {
-       struct hda_intel *hda = container_of(work, struct hda_intel, probe_work);
+       struct hda_intel *hda = container_of(work, struct hda_intel, probe_work.work);
        azx_probe_continue(&hda->chip);
 }
 
@@ -1822,7 +1829,7 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
        }
 
        /* continue probing in work context as may trigger request module */
-       INIT_WORK(&hda->probe_work, azx_probe_work);
+       INIT_DELAYED_WORK(&hda->probe_work, azx_probe_work);
 
        *rchip = chip;
 
@@ -2136,7 +2143,7 @@ static int azx_probe(struct pci_dev *pci,
 #endif
 
        if (schedule_probe)
-               schedule_work(&hda->probe_work);
+               schedule_delayed_work(&hda->probe_work, 0);
 
        dev++;
        if (chip->disabled)
@@ -2222,6 +2229,11 @@ static int azx_probe_continue(struct azx *chip)
        int dev = chip->dev_index;
        int err;
 
+       if (chip->disabled || hda->init_failed)
+               return -EIO;
+       if (hda->probe_retry)
+               goto probe_retry;
+
        to_hda_bus(bus)->bus_probing = 1;
        hda->probe_continued = 1;
 
@@ -2283,10 +2295,20 @@ static int azx_probe_continue(struct azx *chip)
 #endif
        }
 #endif
+
+ probe_retry:
        if (bus->codec_mask && !(probe_only[dev] & 1)) {
                err = azx_codec_configure(chip);
-               if (err < 0)
+               if (err) {
+                       if ((chip->driver_caps & AZX_DCAPS_RETRY_PROBE) &&
+                           ++hda->probe_retry < 60) {
+                               schedule_delayed_work(&hda->probe_work,
+                                                     msecs_to_jiffies(1000));
+                               return 0; /* keep things up */
+                       }
+                       dev_err(chip->card->dev, "Cannot probe codecs, giving up\n");
                        goto out_free;
+               }
        }
 
        err = snd_card_register(chip->card);
@@ -2316,6 +2338,7 @@ out_free:
                display_power(chip, false);
        complete_all(&hda->probe_wait);
        to_hda_bus(bus)->bus_probing = 0;
+       hda->probe_retry = 0;
        return 0;
 }
 
@@ -2341,7 +2364,7 @@ static void azx_remove(struct pci_dev *pci)
                 * device during cancel_work_sync() call.
                 */
                device_unlock(&pci->dev);
-               cancel_work_sync(&hda->probe_work);
+               cancel_delayed_work_sync(&hda->probe_work);
                device_lock(&pci->dev);
 
                snd_card_free(card);
@@ -2357,7 +2380,7 @@ static void azx_shutdown(struct pci_dev *pci)
                return;
        chip = card->private_data;
        if (chip && chip->running)
-               azx_shutdown_chip(chip);
+               __azx_shutdown_chip(chip, true);
 }
 
 /* PCI IDs */
index 3fb119f090408865e2a380292807469b9bd8d369..0f39418f9328b08e7c34c2db540eed30f734100c 100644 (file)
@@ -14,7 +14,7 @@ struct hda_intel {
 
        /* sync probing */
        struct completion probe_wait;
-       struct work_struct probe_work;
+       struct delayed_work probe_work;
 
        /* card list (for power_save trigger) */
        struct list_head list;
@@ -30,6 +30,8 @@ struct hda_intel {
        unsigned int freed:1; /* resources already released */
 
        bool need_i915_power:1; /* the hda controller needs i915 power */
+
+       int probe_retry;        /* being probe-retry */
 };
 
 #endif
index 3c7ef55d016e9b65bc976dcd12c6c591f22496b8..31ff11ab868e1ac3c1aff15d20a52728884e2788 100644 (file)
@@ -1207,6 +1207,9 @@ void dolphin_fixups(struct hda_codec *codec, const struct hda_fixup *fix, int ac
                snd_hda_jack_add_kctl(codec, DOLPHIN_LO_PIN_NID, "Line Out", true,
                                      SND_JACK_HEADPHONE, NULL);
 
+               snd_hda_jack_add_kctl(codec, DOLPHIN_AMIC_PIN_NID, "Microphone", true,
+                                     SND_JACK_MICROPHONE, NULL);
+
                cs8409_fix_caps(codec, DOLPHIN_HP_PIN_NID);
                cs8409_fix_caps(codec, DOLPHIN_LO_PIN_NID);
                cs8409_fix_caps(codec, DOLPHIN_AMIC_PIN_NID);
index 8b7a389b6aedb830639df9d3b4604f2493eda5aa..965b096f416f69bd3b7d3d0e985c84b3f22656ee 100644 (file)
@@ -526,6 +526,8 @@ static void alc_shutup_pins(struct hda_codec *codec)
        struct alc_spec *spec = codec->spec;
 
        switch (codec->core.vendor_id) {
+       case 0x10ec0236:
+       case 0x10ec0256:
        case 0x10ec0283:
        case 0x10ec0286:
        case 0x10ec0288:
@@ -2533,11 +2535,13 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+       SND_PCI_QUIRK(0x1558, 0x65f1, "Clevo PC50HS", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
-       SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+       SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170SM", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+       SND_PCI_QUIRK(0x1558, 0x7715, "Clevo X170KM-G", ALC1220_FIXUP_CLEVO_PB51ED),
        SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1558, 0x9506, "Clevo P955HQ", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1558, 0x950a, "Clevo P955H[PR]", ALC1220_FIXUP_CLEVO_P950),
@@ -3528,7 +3532,8 @@ static void alc256_shutup(struct hda_codec *codec)
        /* If disable 3k pulldown control for alc257, the Mic detection will not work correctly
         * when booting with headset plugged. So skip setting it for the codec alc257
         */
-       if (codec->core.vendor_id != 0x10ec0257)
+       if (spec->codec_variant != ALC269_TYPE_ALC257 &&
+           spec->codec_variant != ALC269_TYPE_ALC256)
                alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
 
        if (!spec->no_shutup_pins)
@@ -6401,6 +6406,44 @@ static void alc_fixup_no_int_mic(struct hda_codec *codec,
        }
 }
 
+/* GPIO1 = amplifier on/off
+ * GPIO3 = mic mute LED
+ */
+static void alc285_fixup_hp_spectre_x360_eb1(struct hda_codec *codec,
+                                         const struct hda_fixup *fix, int action)
+{
+       static const hda_nid_t conn[] = { 0x02 };
+
+       struct alc_spec *spec = codec->spec;
+       static const struct hda_pintbl pincfgs[] = {
+               { 0x14, 0x90170110 },  /* front/high speakers */
+               { 0x17, 0x90170130 },  /* back/bass speakers */
+               { }
+       };
+
+       //enable micmute led
+       alc_fixup_hp_gpio_led(codec, action, 0x00, 0x04);
+
+       switch (action) {
+       case HDA_FIXUP_ACT_PRE_PROBE:
+               spec->micmute_led_polarity = 1;
+               /* needed for amp of back speakers */
+               spec->gpio_mask |= 0x01;
+               spec->gpio_dir |= 0x01;
+               snd_hda_apply_pincfgs(codec, pincfgs);
+               /* share DAC to have unified volume control */
+               snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn), conn);
+               snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
+               break;
+       case HDA_FIXUP_ACT_INIT:
+               /* need to toggle GPIO to enable the amp of back speakers */
+               alc_update_gpio_data(codec, 0x01, true);
+               msleep(100);
+               alc_update_gpio_data(codec, 0x01, false);
+               break;
+       }
+}
+
 static void alc285_fixup_hp_spectre_x360(struct hda_codec *codec,
                                          const struct hda_fixup *fix, int action)
 {
@@ -6429,12 +6472,44 @@ static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
        hda_fixup_thinkpad_acpi(codec, fix, action);
 }
 
+/* Fixup for Lenovo Legion 15IMHg05 speaker output on headset removal. */
+static void alc287_fixup_legion_15imhg05_speakers(struct hda_codec *codec,
+                                                 const struct hda_fixup *fix,
+                                                 int action)
+{
+       struct alc_spec *spec = codec->spec;
+
+       switch (action) {
+       case HDA_FIXUP_ACT_PRE_PROBE:
+               spec->gen.suppress_auto_mute = 1;
+               break;
+       }
+}
+
 /* for alc295_fixup_hp_top_speakers */
 #include "hp_x360_helper.c"
 
 /* for alc285_fixup_ideapad_s740_coef() */
 #include "ideapad_s740_helper.c"
 
+static void alc256_fixup_tongfang_reset_persistent_settings(struct hda_codec *codec,
+                                                           const struct hda_fixup *fix,
+                                                           int action)
+{
+       /*
+       * A certain other OS sets these coeffs to different values. On at least one TongFang
+       * barebone these settings might survive even a cold reboot. So to restore a clean slate the
+       * values are explicitly reset to default here. Without this, the external microphone is
+       * always in a plugged-in state, while the internal microphone is always in an unplugged
+       * state, breaking the ability to use the internal microphone.
+       */
+       alc_write_coef_idx(codec, 0x24, 0x0000);
+       alc_write_coef_idx(codec, 0x26, 0x0000);
+       alc_write_coef_idx(codec, 0x29, 0x3000);
+       alc_write_coef_idx(codec, 0x37, 0xfe05);
+       alc_write_coef_idx(codec, 0x45, 0x5089);
+}
+
 enum {
        ALC269_FIXUP_GPIO2,
        ALC269_FIXUP_SONY_VAIO,
@@ -6521,6 +6596,7 @@ enum {
        ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED,
        ALC280_FIXUP_HP_9480M,
        ALC245_FIXUP_HP_X360_AMP,
+       ALC285_FIXUP_HP_SPECTRE_X360_EB1,
        ALC288_FIXUP_DELL_HEADSET_MODE,
        ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
        ALC288_FIXUP_DELL_XPS_13,
@@ -6646,6 +6722,11 @@ enum {
        ALC623_FIXUP_LENOVO_THINKSTATION_P340,
        ALC255_FIXUP_ACER_HEADPHONE_AND_MIC,
        ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST,
+       ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS,
+       ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
+       ALC287_FIXUP_YOGA7_14ITL_SPEAKERS,
+       ALC287_FIXUP_13S_GEN2_SPEAKERS,
+       ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -8209,6 +8290,10 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc285_fixup_hp_spectre_x360,
        },
+       [ALC285_FIXUP_HP_SPECTRE_X360_EB1] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc285_fixup_hp_spectre_x360_eb1
+       },
        [ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc285_fixup_ideapad_s740_coef,
@@ -8236,6 +8321,117 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
        },
+       [ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS] = {
+               .type = HDA_FIXUP_VERBS,
+               //.v.verbs = legion_15imhg05_coefs,
+               .v.verbs = (const struct hda_verb[]) {
+                        // set left speaker Legion 7i.
+                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x41 },
+
+                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0xc },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x1a },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+                        // set right speaker Legion 7i.
+                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x42 },
+
+                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0xc },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x2a },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+                        {}
+               },
+               .chained = true,
+               .chain_id = ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
+       },
+       [ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc287_fixup_legion_15imhg05_speakers,
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MODE,
+       },
+       [ALC287_FIXUP_YOGA7_14ITL_SPEAKERS] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                        // set left speaker Yoga 7i.
+                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x41 },
+
+                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0xc },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x1a },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+                        // set right speaker Yoga 7i.
+                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x46 },
+
+                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0xc },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x2a },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+                        {}
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MODE,
+       },
+       [ALC287_FIXUP_13S_GEN2_SPEAKERS] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x41 },
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x42 },
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+                       {}
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MODE,
+       },
+       [ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc256_fixup_tongfang_reset_persistent_settings,
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8327,6 +8523,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x0a58, "Dell", ALC255_FIXUP_DELL_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x0a61, "Dell XPS 15 9510", ALC289_FIXUP_DUAL_SPK),
+       SND_PCI_QUIRK(0x1028, 0x0a62, "Dell Precision 5560", ALC289_FIXUP_DUAL_SPK),
+       SND_PCI_QUIRK(0x1028, 0x0a9d, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -8429,6 +8628,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
        SND_PCI_QUIRK(0x103c, 0x8805, "HP ProBook 650 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x8811, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
+       SND_PCI_QUIRK(0x103c, 0x8812, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
        SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8847, "HP EliteBook x360 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
@@ -8630,6 +8831,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
        SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
        SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP),
+       SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
+       SND_PCI_QUIRK(0x17aa, 0x3852, "Lenovo Yoga 7 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+       SND_PCI_QUIRK(0x17aa, 0x3853, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+       SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS),
        SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
        SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
@@ -8660,6 +8865,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
        SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
        SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
+       SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS),
        SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
        SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
@@ -8845,6 +9051,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC245_FIXUP_HP_X360_AMP, .name = "alc245-hp-x360-amp"},
        {.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"},
        {.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"},
+       {.id = ALC285_FIXUP_HP_SPECTRE_X360_EB1, .name = "alc285-hp-spectre-x360-eb1"},
        {.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"},
        {.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
        {.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
@@ -10037,6 +10244,9 @@ enum {
        ALC671_FIXUP_HP_HEADSET_MIC2,
        ALC662_FIXUP_ACER_X2660G_HEADSET_MODE,
        ALC662_FIXUP_ACER_NITRO_HEADSET_MODE,
+       ALC668_FIXUP_ASUS_NO_HEADSET_MIC,
+       ALC668_FIXUP_HEADSET_MIC,
+       ALC668_FIXUP_MIC_DET_COEF,
 };
 
 static const struct hda_fixup alc662_fixups[] = {
@@ -10420,6 +10630,29 @@ static const struct hda_fixup alc662_fixups[] = {
                .chained = true,
                .chain_id = ALC662_FIXUP_USI_FUNC
        },
+       [ALC668_FIXUP_ASUS_NO_HEADSET_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1b, 0x04a1112c },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC668_FIXUP_HEADSET_MIC
+       },
+       [ALC668_FIXUP_HEADSET_MIC] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc269_fixup_headset_mic,
+               .chained = true,
+               .chain_id = ALC668_FIXUP_MIC_DET_COEF
+       },
+       [ALC668_FIXUP_MIC_DET_COEF] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x15 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x0d60 },
+                       {}
+               },
+       },
 };
 
 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -10455,6 +10688,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
        SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
        SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51),
+       SND_PCI_QUIRK(0x1043, 0x185d, "ASUS G551JW", ALC668_FIXUP_ASUS_NO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x1963, "ASUS X71SL", ALC662_FIXUP_ASUS_MODE8),
        SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
        SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
index 87d24224c042e22ef3b4dcc466042c999e9992eb..23f253effb4faa928af41c545a2d26e53f5ea427 100644 (file)
@@ -52,7 +52,7 @@
 #define PCXHR_DSP 2
 
 #if (PCXHR_DSP_OFFSET_MAX > PCXHR_PLX_OFFSET_MIN)
-#undef  PCXHR_REG_TO_PORT(x)
+#error  PCXHR_REG_TO_PORT(x)
 #else
 #define PCXHR_REG_TO_PORT(x)   ((x)>PCXHR_DSP_OFFSET_MAX ? PCXHR_PLX : PCXHR_DSP)
 #endif
index 82ee233a269d07330dca4c2808d644991f910e58..216cea04ad704cdeecb941c8b1e2ab9cdfce9132 100644 (file)
@@ -1583,6 +1583,7 @@ config SND_SOC_WCD938X_SDW
        tristate "WCD9380/WCD9385 Codec - SDW"
        select SND_SOC_WCD938X
        select SND_SOC_WCD_MBHC
+       select REGMAP_IRQ
        depends on SOUNDWIRE
        select REGMAP_SOUNDWIRE
        help
index fb1e4c33e27d34e9843f9edf79e0fa3a82159bd9..9a463ab54bddc6d74ca5cc6b4a3ab2963e11e069 100644 (file)
@@ -922,7 +922,6 @@ static int cs42l42_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
        struct snd_soc_component *component = dai->component;
        struct cs42l42_private *cs42l42 = snd_soc_component_get_drvdata(component);
        unsigned int regval;
-       u8 fullScaleVol;
        int ret;
 
        if (mute) {
@@ -993,20 +992,11 @@ static int cs42l42_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
                cs42l42->stream_use |= 1 << stream;
 
                if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
-                       /* Read the headphone load */
-                       regval = snd_soc_component_read(component, CS42L42_LOAD_DET_RCSTAT);
-                       if (((regval & CS42L42_RLA_STAT_MASK) >> CS42L42_RLA_STAT_SHIFT) ==
-                           CS42L42_RLA_STAT_15_OHM) {
-                               fullScaleVol = CS42L42_HP_FULL_SCALE_VOL_MASK;
-                       } else {
-                               fullScaleVol = 0;
-                       }
-
-                       /* Un-mute the headphone, set the full scale volume flag */
+                       /* Un-mute the headphone */
                        snd_soc_component_update_bits(component, CS42L42_HP_CTL,
                                                      CS42L42_HP_ANA_AMUTE_MASK |
-                                                     CS42L42_HP_ANA_BMUTE_MASK |
-                                                     CS42L42_HP_FULL_SCALE_VOL_MASK, fullScaleVol);
+                                                     CS42L42_HP_ANA_BMUTE_MASK,
+                                                     0);
                }
        }
 
index 7d3e54d8eef36f46f15b3b3d92c1469b70377416..29d05e32d3417f0879036bac718354a1e33010ff 100644 (file)
@@ -305,12 +305,19 @@ static int cs4341_spi_probe(struct spi_device *spi)
        return cs4341_probe(&spi->dev);
 }
 
+static const struct spi_device_id cs4341_spi_ids[] = {
+       { "cs4341a" },
+       { }
+};
+MODULE_DEVICE_TABLE(spi, cs4341_spi_ids);
+
 static struct spi_driver cs4341_spi_driver = {
        .driver = {
                .name = "cs4341-spi",
                .of_match_table = of_match_ptr(cs4341_dt_ids),
        },
        .probe = cs4341_spi_probe,
+       .id_table = cs4341_spi_ids,
 };
 #endif
 
index db88be48c9980196550bb654785d175aca62e70e..f946ef65a4c19a9f90106b14804459ed16c62e61 100644 (file)
@@ -867,8 +867,8 @@ static void nau8824_jdet_work(struct work_struct *work)
        struct regmap *regmap = nau8824->regmap;
        int adc_value, event = 0, event_mask = 0;
 
-       snd_soc_dapm_enable_pin(dapm, "MICBIAS");
-       snd_soc_dapm_enable_pin(dapm, "SAR");
+       snd_soc_dapm_force_enable_pin(dapm, "MICBIAS");
+       snd_soc_dapm_force_enable_pin(dapm, "SAR");
        snd_soc_dapm_sync(dapm);
 
        msleep(100);
index 0a542924ec5f969a94d236e2414e3684f7a744c7..ebf63ea90a1c4c7097f37383f0654b92e75fcc9e 100644 (file)
@@ -36,6 +36,7 @@ static const struct of_device_id pcm179x_of_match[] = {
 MODULE_DEVICE_TABLE(of, pcm179x_of_match);
 
 static const struct spi_device_id pcm179x_spi_ids[] = {
+       { "pcm1792a", 0 },
        { "pcm179x", 0 },
        { },
 };
index 4dc844f3c1fc0a95ac0f734939eb022488a0f847..60dee41816dc29369a371a8afc09a49821993244 100644 (file)
@@ -116,6 +116,8 @@ static const struct reg_default pcm512x_reg_defaults[] = {
        { PCM512x_FS_SPEED_MODE,     0x00 },
        { PCM512x_IDAC_1,            0x01 },
        { PCM512x_IDAC_2,            0x00 },
+       { PCM512x_I2S_1,             0x02 },
+       { PCM512x_I2S_2,             0x00 },
 };
 
 static bool pcm512x_readable(struct device *dev, unsigned int reg)
index f0daf8defcf1edd84b36ea037fe02a1ad7c19c98..52de7d14b13985970d44bb3f36203bfc9545d600 100644 (file)
@@ -4144,10 +4144,10 @@ static int wcd938x_codec_set_jack(struct snd_soc_component *comp,
 {
        struct wcd938x_priv *wcd = dev_get_drvdata(comp->dev);
 
-       if (!jack)
+       if (jack)
                return wcd_mbhc_start(wcd->wcd_mbhc, &wcd->mbhc_cfg, jack);
-
-       wcd_mbhc_stop(wcd->wcd_mbhc);
+       else
+               wcd_mbhc_stop(wcd->wcd_mbhc);
 
        return 0;
 }
index 9e621a254392c77f6747b2a947b5f94505eb3b86..499604f1e178951026f30b3164f335b27a843323 100644 (file)
@@ -742,9 +742,16 @@ static int wm8960_configure_clocking(struct snd_soc_component *component)
        int i, j, k;
        int ret;
 
-       if (!(iface1 & (1<<6))) {
-               dev_dbg(component->dev,
-                       "Codec is slave mode, no need to configure clock\n");
+       /*
+        * For Slave mode clocking should still be configured,
+        * so this if statement should be removed, but some platform
+        * may not work if the sysclk is not configured, to avoid such
+        * compatible issue, just add '!wm8960->sysclk' condition in
+        * this if statement.
+        */
+       if (!(iface1 & (1 << 6)) && !wm8960->sysclk) {
+               dev_warn(component->dev,
+                        "slave mode, but proceeding with no clock configuration\n");
                return 0;
        }
 
index a961f837cd094e0b6b963cb4665e8a2ea3b1c40d..bda66b30e063c24f4eb982288cdb5654dc701985 100644 (file)
@@ -1073,6 +1073,16 @@ static int fsl_esai_probe(struct platform_device *pdev)
        if (ret < 0)
                goto err_pm_get_sync;
 
+       /*
+        * Register platform component before registering cpu dai for there
+        * is not defer probe for platform component in snd_soc_add_pcm_runtime().
+        */
+       ret = imx_pcm_dma_init(pdev, IMX_ESAI_DMABUF_SIZE);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret);
+               goto err_pm_get_sync;
+       }
+
        ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component,
                                              &fsl_esai_dai, 1);
        if (ret) {
@@ -1082,12 +1092,6 @@ static int fsl_esai_probe(struct platform_device *pdev)
 
        INIT_WORK(&esai_priv->work, fsl_esai_hw_reset);
 
-       ret = imx_pcm_dma_init(pdev, IMX_ESAI_DMABUF_SIZE);
-       if (ret) {
-               dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret);
-               goto err_pm_get_sync;
-       }
-
        return ret;
 
 err_pm_get_sync:
index 8c0c75ce9490faf79c8b43b961b6907f976ab911..9f90989ac59a6f48b38d6e3d4ab2d1f3e63ce92b 100644 (file)
@@ -737,18 +737,23 @@ static int fsl_micfil_probe(struct platform_device *pdev)
        pm_runtime_enable(&pdev->dev);
        regcache_cache_only(micfil->regmap, true);
 
+       /*
+        * Register platform component before registering cpu dai for there
+        * is not defer probe for platform component in snd_soc_add_pcm_runtime().
+        */
+       ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to pcm register\n");
+               return ret;
+       }
+
        ret = devm_snd_soc_register_component(&pdev->dev, &fsl_micfil_component,
                                              &fsl_micfil_dai, 1);
        if (ret) {
                dev_err(&pdev->dev, "failed to register component %s\n",
                        fsl_micfil_component.name);
-               return ret;
        }
 
-       ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
-       if (ret)
-               dev_err(&pdev->dev, "failed to pcm register\n");
-
        return ret;
 }
 
index 223fcd15bfcccf44ebb680b3fd3b33902693328b..38f6362099d587ab97d78aeb1ec79913917d411f 100644 (file)
@@ -1152,11 +1152,10 @@ static int fsl_sai_probe(struct platform_device *pdev)
        if (ret < 0)
                goto err_pm_get_sync;
 
-       ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
-                                             &sai->cpu_dai_drv, 1);
-       if (ret)
-               goto err_pm_get_sync;
-
+       /*
+        * Register platform component before registering cpu dai for there
+        * is not defer probe for platform component in snd_soc_add_pcm_runtime().
+        */
        if (sai->soc_data->use_imx_pcm) {
                ret = imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE);
                if (ret)
@@ -1167,6 +1166,11 @@ static int fsl_sai_probe(struct platform_device *pdev)
                        goto err_pm_get_sync;
        }
 
+       ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
+                                             &sai->cpu_dai_drv, 1);
+       if (ret)
+               goto err_pm_get_sync;
+
        return ret;
 
 err_pm_get_sync:
index 8ffb1a6048d63f06f2c676c216069eef1e6eac06..1c53719bb61e234d5456ec4cc7d9be234bd86403 100644 (file)
@@ -1434,16 +1434,20 @@ static int fsl_spdif_probe(struct platform_device *pdev)
        pm_runtime_enable(&pdev->dev);
        regcache_cache_only(spdif_priv->regmap, true);
 
-       ret = devm_snd_soc_register_component(&pdev->dev, &fsl_spdif_component,
-                                             &spdif_priv->cpu_dai_drv, 1);
+       /*
+        * Register platform component before registering cpu dai for there
+        * is not defer probe for platform component in snd_soc_add_pcm_runtime().
+        */
+       ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE);
        if (ret) {
-               dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);
+               dev_err_probe(&pdev->dev, ret, "imx_pcm_dma_init failed\n");
                goto err_pm_disable;
        }
 
-       ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE);
+       ret = devm_snd_soc_register_component(&pdev->dev, &fsl_spdif_component,
+                                             &spdif_priv->cpu_dai_drv, 1);
        if (ret) {
-               dev_err_probe(&pdev->dev, ret, "imx_pcm_dma_init failed\n");
+               dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);
                goto err_pm_disable;
        }
 
index 31c5ee641fe76268ce5e16d38d71d36372d024f0..d0556c79fdb1531aaa6f84d53c2e6fcc3da82c24 100644 (file)
@@ -487,8 +487,9 @@ static int fsl_xcvr_prepare(struct snd_pcm_substream *substream,
                return ret;
        }
 
-       /* clear DPATH RESET */
+       /* set DPATH RESET */
        m_ctl |= FSL_XCVR_EXT_CTRL_DPTH_RESET(tx);
+       v_ctl |= FSL_XCVR_EXT_CTRL_DPTH_RESET(tx);
        ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_CTRL, m_ctl, v_ctl);
        if (ret < 0) {
                dev_err(dai->dev, "Error while setting EXT_CTRL: %d\n", ret);
@@ -590,10 +591,6 @@ static void fsl_xcvr_shutdown(struct snd_pcm_substream *substream,
                val  |= FSL_XCVR_EXT_CTRL_CMDC_RESET(tx);
        }
 
-       /* set DPATH RESET */
-       mask |= FSL_XCVR_EXT_CTRL_DPTH_RESET(tx);
-       val  |= FSL_XCVR_EXT_CTRL_DPTH_RESET(tx);
-
        ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_CTRL, mask, val);
        if (ret < 0) {
                dev_err(dai->dev, "Err setting DPATH RESET: %d\n", ret);
@@ -643,6 +640,16 @@ static int fsl_xcvr_trigger(struct snd_pcm_substream *substream, int cmd,
                        dev_err(dai->dev, "Failed to enable DMA: %d\n", ret);
                        return ret;
                }
+
+               /* clear DPATH RESET */
+               ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_CTRL,
+                                        FSL_XCVR_EXT_CTRL_DPTH_RESET(tx),
+                                        0);
+               if (ret < 0) {
+                       dev_err(dai->dev, "Failed to clear DPATH RESET: %d\n", ret);
+                       return ret;
+               }
+
                break;
        case SNDRV_PCM_TRIGGER_STOP:
        case SNDRV_PCM_TRIGGER_SUSPEND:
@@ -1215,18 +1222,23 @@ static int fsl_xcvr_probe(struct platform_device *pdev)
        pm_runtime_enable(dev);
        regcache_cache_only(xcvr->regmap, true);
 
+       /*
+        * Register platform component before registering cpu dai for there
+        * is not defer probe for platform component in snd_soc_add_pcm_runtime().
+        */
+       ret = devm_snd_dmaengine_pcm_register(dev, NULL, 0);
+       if (ret) {
+               dev_err(dev, "failed to pcm register\n");
+               return ret;
+       }
+
        ret = devm_snd_soc_register_component(dev, &fsl_xcvr_comp,
                                              &fsl_xcvr_dai, 1);
        if (ret) {
                dev_err(dev, "failed to register component %s\n",
                        fsl_xcvr_comp.name);
-               return ret;
        }
 
-       ret = devm_snd_dmaengine_pcm_register(dev, NULL, 0);
-       if (ret)
-               dev_err(dev, "failed to pcm register\n");
-
        return ret;
 }
 
index 055248f104b24537c68c88d636c4ff33f147846d..4d313d0d0f23e4c2fd019157595053b7f7daa299 100644 (file)
@@ -456,12 +456,12 @@ static const struct dmi_system_id byt_cht_es8316_quirk_table[] = {
 
 static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
 {
+       struct device *dev = &pdev->dev;
        static const char * const mic_name[] = { "in1", "in2" };
+       struct snd_soc_acpi_mach *mach = dev_get_platdata(dev);
        struct property_entry props[MAX_NO_PROPS] = {};
        struct byt_cht_es8316_private *priv;
        const struct dmi_system_id *dmi_id;
-       struct device *dev = &pdev->dev;
-       struct snd_soc_acpi_mach *mach;
        struct fwnode_handle *fwnode;
        const char *platform_name;
        struct acpi_device *adev;
@@ -476,7 +476,6 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
        if (!priv)
                return -ENOMEM;
 
-       mach = dev->platform_data;
        /* fix index of codec dai */
        for (i = 0; i < ARRAY_SIZE(byt_cht_es8316_dais); i++) {
                if (!strcmp(byt_cht_es8316_dais[i].codecs->name,
@@ -494,7 +493,7 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
                put_device(&adev->dev);
                byt_cht_es8316_dais[dai_index].codecs->name = codec_name;
        } else {
-               dev_err(&pdev->dev, "Error cannot find '%s' dev\n", mach->id);
+               dev_err(dev, "Error cannot find '%s' dev\n", mach->id);
                return -ENXIO;
        }
 
@@ -533,11 +532,8 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
 
        /* get the clock */
        priv->mclk = devm_clk_get(dev, "pmc_plt_clk_3");
-       if (IS_ERR(priv->mclk)) {
-               ret = PTR_ERR(priv->mclk);
-               dev_err(dev, "clk_get pmc_plt_clk_3 failed: %d\n", ret);
-               return ret;
-       }
+       if (IS_ERR(priv->mclk))
+               return dev_err_probe(dev, PTR_ERR(priv->mclk), "clk_get pmc_plt_clk_3 failed\n");
 
        /* get speaker enable GPIO */
        codec_dev = acpi_get_first_physical_node(adev);
@@ -567,22 +563,13 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
 
        devm_acpi_dev_add_driver_gpios(codec_dev, byt_cht_es8316_gpios);
        priv->speaker_en_gpio =
-               gpiod_get_index(codec_dev, "speaker-enable", 0,
-                               /* see comment in byt_cht_es8316_resume */
-                               GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
-
+               gpiod_get_optional(codec_dev, "speaker-enable",
+                                  /* see comment in byt_cht_es8316_resume() */
+                                  GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
        if (IS_ERR(priv->speaker_en_gpio)) {
-               ret = PTR_ERR(priv->speaker_en_gpio);
-               switch (ret) {
-               case -ENOENT:
-                       priv->speaker_en_gpio = NULL;
-                       break;
-               default:
-                       dev_err(dev, "get speaker GPIO failed: %d\n", ret);
-                       fallthrough;
-               case -EPROBE_DEFER:
-                       goto err_put_codec;
-               }
+               ret = dev_err_probe(dev, PTR_ERR(priv->speaker_en_gpio),
+                                   "get speaker GPIO failed\n");
+               goto err_put_codec;
        }
 
        snprintf(components_string, sizeof(components_string),
@@ -597,7 +584,7 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
        byt_cht_es8316_card.long_name = long_name;
 #endif
 
-       sof_parent = snd_soc_acpi_sof_parent(&pdev->dev);
+       sof_parent = snd_soc_acpi_sof_parent(dev);
 
        /* set card and driver name */
        if (sof_parent) {
index 6602eda89e8efaf9c3ad2bea5f9e90c80a1ce514..6b06248a9327a51a4229b6b69771d70f47e0772e 100644 (file)
@@ -929,6 +929,11 @@ static int create_sdw_dailink(struct snd_soc_card *card,
                              cpus + *cpu_id, cpu_dai_num,
                              codecs, codec_num,
                              NULL, &sdw_ops);
+               /*
+                * SoundWire DAILINKs use 'stream' functions and Bank Switch operations
+                * based on wait_for_completion(), tag them as 'nonatomic'.
+                */
+               dai_links[*be_index].nonatomic = true;
 
                ret = set_codec_init_func(card, link, dai_links + (*be_index)++,
                                          playback, group_id);
index 5a2f4667d50b398d76c138db91bb1debe566d6b8..81ad2dcee9ebc0e6025c7411d292216a5b9f956c 100644 (file)
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 config SND_SOC_MEDIATEK
        tristate
+       select REGMAP_MMIO
 
 config SND_SOC_MT2701
        tristate "ASoC support for Mediatek MT2701 chip"
@@ -188,7 +189,9 @@ config SND_SOC_MT8192_MT6359_RT1015_RT5682
 config SND_SOC_MT8195
        tristate "ASoC support for Mediatek MT8195 chip"
        depends on ARCH_MEDIATEK || COMPILE_TEST
+       depends on COMMON_CLK
        select SND_SOC_MEDIATEK
+       select MFD_SYSCON if SND_SOC_MT6359
        help
          This adds ASoC platform driver support for Mediatek MT8195 chip
          that can be used with other codecs.
index baaa5881b1d4849e789b2eff3c834a9b0a927f2c..e95c7c018e7d4c028f26755c7a950da9f160c638 100644 (file)
@@ -334,9 +334,11 @@ int mtk_afe_suspend(struct snd_soc_component *component)
                        devm_kcalloc(dev, afe->reg_back_up_list_num,
                                     sizeof(unsigned int), GFP_KERNEL);
 
-       for (i = 0; i < afe->reg_back_up_list_num; i++)
-               regmap_read(regmap, afe->reg_back_up_list[i],
-                           &afe->reg_back_up[i]);
+       if (afe->reg_back_up) {
+               for (i = 0; i < afe->reg_back_up_list_num; i++)
+                       regmap_read(regmap, afe->reg_back_up_list[i],
+                                   &afe->reg_back_up[i]);
+       }
 
        afe->suspended = true;
        afe->runtime_suspend(dev);
@@ -356,12 +358,13 @@ int mtk_afe_resume(struct snd_soc_component *component)
 
        afe->runtime_resume(dev);
 
-       if (!afe->reg_back_up)
+       if (!afe->reg_back_up) {
                dev_dbg(dev, "%s no reg_backup\n", __func__);
-
-       for (i = 0; i < afe->reg_back_up_list_num; i++)
-               mtk_regmap_write(regmap, afe->reg_back_up_list[i],
-                                afe->reg_back_up[i]);
+       } else {
+               for (i = 0; i < afe->reg_back_up_list_num; i++)
+                       mtk_regmap_write(regmap, afe->reg_back_up_list[i],
+                                        afe->reg_back_up[i]);
+       }
 
        afe->suspended = false;
        return 0;
index c97ace7387b4cdce5f506f17789a8c67ecb6cede..de09f67c04502207a878f22d8348da9a1940877f 100644 (file)
@@ -424,8 +424,8 @@ static int mt8195_hdmi_codec_init(struct snd_soc_pcm_runtime *rtd)
        return snd_soc_component_set_jack(cmpnt_codec, &priv->hdmi_jack, NULL);
 }
 
-static int mt8195_hdmitx_dptx_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
-                                             struct snd_pcm_hw_params *params)
+static int mt8195_dptx_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+                                      struct snd_pcm_hw_params *params)
 
 {
        /* fix BE i2s format to 32bit, clean param mask first */
@@ -902,7 +902,7 @@ static struct snd_soc_dai_link mt8195_mt6359_rt1019_rt5682_dai_links[] = {
                .no_pcm = 1,
                .dpcm_playback = 1,
                .ops = &mt8195_dptx_ops,
-               .be_hw_params_fixup = mt8195_hdmitx_dptx_hw_params_fixup,
+               .be_hw_params_fixup = mt8195_dptx_hw_params_fixup,
                SND_SOC_DAILINK_REG(DPTX_BE),
        },
        [DAI_LINK_ETDM1_IN_BE] = {
@@ -953,7 +953,6 @@ static struct snd_soc_dai_link mt8195_mt6359_rt1019_rt5682_dai_links[] = {
                        SND_SOC_DAIFMT_NB_NF |
                        SND_SOC_DAIFMT_CBS_CFS,
                .dpcm_playback = 1,
-               .be_hw_params_fixup = mt8195_hdmitx_dptx_hw_params_fixup,
                SND_SOC_DAILINK_REG(ETDM3_OUT_BE),
        },
        [DAI_LINK_PCM1_BE] = {
index c830e96afba244a0f13fd4194446cadd477afb4f..80ca260595fda358a67e9960398a863e2533d52d 100644 (file)
@@ -2599,6 +2599,7 @@ int snd_soc_component_initialize(struct snd_soc_component *component,
        INIT_LIST_HEAD(&component->dai_list);
        INIT_LIST_HEAD(&component->dobj_list);
        INIT_LIST_HEAD(&component->card_list);
+       INIT_LIST_HEAD(&component->list);
        mutex_init(&component->io_mutex);
 
        component->name = fmt_single_name(dev, &component->id);
index 7b67f1e19ae9525d9e4b7417d11cfe9452c13c7d..59d07648a7e7fb51750b131c0e8096a85705449d 100644 (file)
@@ -2561,6 +2561,7 @@ static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
                                const char *pin, int status)
 {
        struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true);
+       int ret = 0;
 
        dapm_assert_locked(dapm);
 
@@ -2573,13 +2574,14 @@ static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
                dapm_mark_dirty(w, "pin configuration");
                dapm_widget_invalidate_input_paths(w);
                dapm_widget_invalidate_output_paths(w);
+               ret = 1;
        }
 
        w->connected = status;
        if (status == 0)
                w->force = 0;
 
-       return 0;
+       return ret;
 }
 
 /**
@@ -3583,14 +3585,15 @@ int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
        const char *pin = (const char *)kcontrol->private_value;
+       int ret;
 
        if (ucontrol->value.integer.value[0])
-               snd_soc_dapm_enable_pin(&card->dapm, pin);
+               ret = snd_soc_dapm_enable_pin(&card->dapm, pin);
        else
-               snd_soc_dapm_disable_pin(&card->dapm, pin);
+               ret = snd_soc_dapm_disable_pin(&card->dapm, pin);
 
        snd_soc_dapm_sync(&card->dapm);
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_put_pin_switch);
 
@@ -4023,7 +4026,7 @@ static int snd_soc_dapm_dai_link_put(struct snd_kcontrol *kcontrol,
 
        rtd->params_select = ucontrol->value.enumerated.item[0];
 
-       return 0;
+       return 1;
 }
 
 static void
index 3e4dd4a86363b822e8b1037adcd3490a81caf57f..59d0d7b2b55c82efff7b77a1126b03bff363f4ed 100644 (file)
@@ -371,7 +371,6 @@ int snd_sof_device_remove(struct device *dev)
                        dev_warn(dev, "error: %d failed to prepare DSP for device removal",
                                 ret);
 
-               snd_sof_fw_unload(sdev);
                snd_sof_ipc_free(sdev);
                snd_sof_free_debug(sdev);
                snd_sof_free_trace(sdev);
@@ -394,8 +393,7 @@ int snd_sof_device_remove(struct device *dev)
                snd_sof_remove(sdev);
 
        /* release firmware */
-       release_firmware(pdata->fw);
-       pdata->fw = NULL;
+       snd_sof_fw_unload(sdev);
 
        return 0;
 }
index 12fedf0984bd90437142c1e959b9376ecf9411f7..7e9723a10d02e015ed6c356a78ba71973e7aa9c9 100644 (file)
@@ -365,7 +365,14 @@ static int imx8_remove(struct snd_sof_dev *sdev)
 /* on i.MX8 there is 1 to 1 match between type and BAR idx */
 static int imx8_get_bar_index(struct snd_sof_dev *sdev, u32 type)
 {
-       return type;
+       /* Only IRAM and SRAM bars are valid */
+       switch (type) {
+       case SOF_FW_BLK_TYPE_IRAM:
+       case SOF_FW_BLK_TYPE_SRAM:
+               return type;
+       default:
+               return -EINVAL;
+       }
 }
 
 static void imx8_ipc_msg_data(struct snd_sof_dev *sdev,
index cb822d9537678d0e9e9972bd47375054cfdefaa0..892e1482f97faaf0cf4f407ee76f8b34372afdad 100644 (file)
@@ -228,7 +228,14 @@ static int imx8m_remove(struct snd_sof_dev *sdev)
 /* on i.MX8 there is 1 to 1 match between type and BAR idx */
 static int imx8m_get_bar_index(struct snd_sof_dev *sdev, u32 type)
 {
-       return type;
+       /* Only IRAM and SRAM bars are valid */
+       switch (type) {
+       case SOF_FW_BLK_TYPE_IRAM:
+       case SOF_FW_BLK_TYPE_SRAM:
+               return type;
+       default:
+               return -EINVAL;
+       }
 }
 
 static void imx8m_ipc_msg_data(struct snd_sof_dev *sdev,
index 2b38a77cd594f56f658ad130f60170bd47c6f96b..bb79c77775b3df968402d348d1233659e0d74170 100644 (file)
@@ -729,10 +729,10 @@ int snd_sof_load_firmware_raw(struct snd_sof_dev *sdev)
        ret = request_firmware(&plat_data->fw, fw_filename, sdev->dev);
 
        if (ret < 0) {
-               dev_err(sdev->dev, "error: request firmware %s failed err: %d\n",
-                       fw_filename, ret);
                dev_err(sdev->dev,
-                       "you may need to download the firmware from https://github.com/thesofproject/sof-bin/\n");
+                       "error: sof firmware file is missing, you might need to\n");
+               dev_err(sdev->dev,
+                       "       download it from https://github.com/thesofproject/sof-bin/\n");
                goto err;
        } else {
                dev_dbg(sdev->dev, "request_firmware %s successful\n",
@@ -880,5 +880,7 @@ EXPORT_SYMBOL(snd_sof_run_firmware);
 void snd_sof_fw_unload(struct snd_sof_dev *sdev)
 {
        /* TODO: support module unloading at runtime */
+       release_firmware(sdev->pdata->fw);
+       sdev->pdata->fw = NULL;
 }
 EXPORT_SYMBOL(snd_sof_fw_unload);
index f72a6e83e6af20c2758cc4e382d1c95266e0c3e7..58f6ca5cf491a6cec698a4e0ac3b14d0740023b4 100644 (file)
@@ -530,7 +530,6 @@ void snd_sof_trace_notify_for_error(struct snd_sof_dev *sdev)
                return;
 
        if (sdev->dtrace_is_enabled) {
-               dev_err(sdev->dev, "error: waking up any trace sleepers\n");
                sdev->dtrace_error = true;
                wake_up(&sdev->trace_sleep);
        }
index bbb9a2282ed9ea4f10d0fcc028ae9447b6cced04..f6e3411b33cf125fb10da389dab4a00e795764a4 100644 (file)
@@ -122,9 +122,9 @@ static void xtensa_stack(struct snd_sof_dev *sdev, void *oops, u32 *stack,
         * 0x0049fbb0: 8000f2d0 0049fc00 6f6c6c61 00632e63
         */
        for (i = 0; i < stack_words; i += 4) {
-               hex_dump_to_buffer(stack + i * 4, 16, 16, 4,
+               hex_dump_to_buffer(stack + i, 16, 16, 4,
                                   buf, sizeof(buf), false);
-               dev_err(sdev->dev, "0x%08x: %s\n", stack_ptr + i, buf);
+               dev_err(sdev->dev, "0x%08x: %s\n", stack_ptr + i * 4, buf);
        }
 }
 
index fd570a42f043198f1b7068132334b44c40bc22b6..1764b9302d467956e401d405f30b7e593be5e2ff 100644 (file)
@@ -1054,7 +1054,7 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
        return 0;
 }
 
-static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
+static int usb_audio_resume(struct usb_interface *intf)
 {
        struct snd_usb_audio *chip = usb_get_intfdata(intf);
        struct snd_usb_stream *as;
@@ -1080,7 +1080,7 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
         * we just notify and restart the mixers
         */
        list_for_each_entry(mixer, &chip->mixer_list, list) {
-               err = snd_usb_mixer_resume(mixer, reset_resume);
+               err = snd_usb_mixer_resume(mixer);
                if (err < 0)
                        goto err_out;
        }
@@ -1100,20 +1100,10 @@ err_out:
        atomic_dec(&chip->active); /* allow autopm after this point */
        return err;
 }
-
-static int usb_audio_resume(struct usb_interface *intf)
-{
-       return __usb_audio_resume(intf, false);
-}
-
-static int usb_audio_reset_resume(struct usb_interface *intf)
-{
-       return __usb_audio_resume(intf, true);
-}
 #else
 #define usb_audio_suspend      NULL
 #define usb_audio_resume       NULL
-#define usb_audio_reset_resume NULL
+#define usb_audio_resume       NULL
 #endif         /* CONFIG_PM */
 
 static const struct usb_device_id usb_audio_ids [] = {
@@ -1135,7 +1125,7 @@ static struct usb_driver usb_audio_driver = {
        .disconnect =   usb_audio_disconnect,
        .suspend =      usb_audio_suspend,
        .resume =       usb_audio_resume,
-       .reset_resume = usb_audio_reset_resume,
+       .reset_resume = usb_audio_resume,
        .id_table =     usb_audio_ids,
        .supports_autosuspend = 1,
 };
index 43bc59575a6e360b0984a41792e5ab4018f5745e..8e030b1c061ab353ab0b1683fd37bf7f542666f8 100644 (file)
@@ -1198,6 +1198,13 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
                        cval->res = 1;
                }
                break;
+       case USB_ID(0x1224, 0x2a25): /* Jieli Technology USB PHY 2.0 */
+               if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
+                       usb_audio_info(chip,
+                               "set resolution quirk: cval->res = 16\n");
+                       cval->res = 16;
+               }
+               break;
        }
 }
 
@@ -3653,33 +3660,16 @@ static int restore_mixer_value(struct usb_mixer_elem_list *list)
        return 0;
 }
 
-static int default_mixer_reset_resume(struct usb_mixer_elem_list *list)
-{
-       int err;
-
-       if (list->resume) {
-               err = list->resume(list);
-               if (err < 0)
-                       return err;
-       }
-       return restore_mixer_value(list);
-}
-
-int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume)
+int snd_usb_mixer_resume(struct usb_mixer_interface *mixer)
 {
        struct usb_mixer_elem_list *list;
-       usb_mixer_elem_resume_func_t f;
        int id, err;
 
        /* restore cached mixer values */
        for (id = 0; id < MAX_ID_ELEMS; id++) {
                for_each_mixer_elem(list, mixer, id) {
-                       if (reset_resume)
-                               f = list->reset_resume;
-                       else
-                               f = list->resume;
-                       if (f) {
-                               err = f(list);
+                       if (list->resume) {
+                               err = list->resume(list);
                                if (err < 0)
                                        return err;
                        }
@@ -3700,7 +3690,6 @@ void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list,
        list->id = unitid;
        list->dump = snd_usb_mixer_dump_cval;
 #ifdef CONFIG_PM
-       list->resume = NULL;
-       list->reset_resume = default_mixer_reset_resume;
+       list->resume = restore_mixer_value;
 #endif
 }
index 876bbc9a71ad7812c14bb32529e3da7e14a3fecd..98ea24d91d803a527c9bd7981ab266edc40934aa 100644 (file)
@@ -70,7 +70,6 @@ struct usb_mixer_elem_list {
        bool is_std_info;
        usb_mixer_elem_dump_func_t dump;
        usb_mixer_elem_resume_func_t resume;
-       usb_mixer_elem_resume_func_t reset_resume;
 };
 
 /* iterate over mixer element list of the given unit id */
@@ -121,7 +120,7 @@ int snd_usb_mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag,
 
 #ifdef CONFIG_PM
 int snd_usb_mixer_suspend(struct usb_mixer_interface *mixer);
-int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume);
+int snd_usb_mixer_resume(struct usb_mixer_interface *mixer);
 #endif
 
 int snd_usb_set_cur_mix_value(struct usb_mixer_elem_info *cval, int channel,
index a66ce0375fd97ca2377e04150ce3b51fa899e7f3..46082dc57be09e7d4d3cf8969da314681a594a2f 100644 (file)
@@ -151,7 +151,7 @@ static int add_single_ctl_with_resume(struct usb_mixer_interface *mixer,
                *listp = list;
        list->mixer = mixer;
        list->id = id;
-       list->reset_resume = resume;
+       list->resume = resume;
        kctl = snd_ctl_new1(knew, list);
        if (!kctl) {
                kfree(list);
index 3d5848d5481be93156792793f92575f3bba850c8..53ebabf42472224ee1dee0eeea5e8f7879c4739c 100644 (file)
@@ -2450,6 +2450,8 @@ static int scarlett2_update_monitor_other(struct usb_mixer_interface *mixer)
                err = scarlett2_usb_get_config(mixer,
                                               SCARLETT2_CONFIG_TALKBACK_MAP,
                                               1, &bitmap);
+               if (err < 0)
+                       return err;
                for (i = 0; i < num_mixes; i++, bitmap >>= 1)
                        private->talkback_map[i] = bitmap & 1;
        }
index e03043f7dad3fe00fa0a0576f0f32fc620127dfc..2af8c68fac275292da2d3af58245069954834c1f 100644 (file)
 /* E-Mu 0204 USB */
 { USB_DEVICE_VENDOR_SPEC(0x041e, 0x3f19) },
 
+/*
+ * Creative Technology, Ltd Live! Cam Sync HD [VF0770]
+ * The device advertises 8 formats, but only a rate of 48kHz is honored by the
+ * hardware and 24 bits give chopped audio, so only report the one working
+ * combination.
+ */
+{
+       USB_DEVICE(0x041e, 0x4095),
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = &(const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_AUDIO_STANDARD_MIXER,
+                       },
+                       {
+                               .ifnum = 3,
+                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+                               .data = &(const struct audioformat) {
+                                       .formats = SNDRV_PCM_FMTBIT_S16_LE,
+                                       .channels = 2,
+                                       .fmt_bits = 16,
+                                       .iface = 3,
+                                       .altsetting = 4,
+                                       .altset_idx = 4,
+                                       .endpoint = 0x82,
+                                       .ep_attr = 0x05,
+                                       .rates = SNDRV_PCM_RATE_48000,
+                                       .rate_min = 48000,
+                                       .rate_max = 48000,
+                                       .nr_rates = 1,
+                                       .rate_table = (unsigned int[]) { 48000 },
+                               },
+                       },
+                       {
+                               .ifnum = -1
+                       },
+               },
+       },
+},
+
 /*
  * HP Wireless Audio
  * When not ignored, causes instability issues for some users, forcing them to
@@ -3970,6 +4012,38 @@ YAMAHA_DEVICE(0x7010, "UB99"),
                }
        }
 },
+{
+       /*
+        * Sennheiser GSP670
+        * Change order of interfaces loaded
+        */
+       USB_DEVICE(0x1395, 0x0300),
+       .bInterfaceClass = USB_CLASS_PER_INTERFACE,
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = &(const struct snd_usb_audio_quirk[]) {
+                       // Communication
+                       {
+                               .ifnum = 3,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       // Recording
+                       {
+                               .ifnum = 4,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       // Main
+                       {
+                               .ifnum = 1,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = -1
+                       }
+               }
+       }
+},
 
 #undef USB_DEVICE_VENDOR_SPEC
 #undef USB_AUDIO_DEVICE
index 6ee6d24c847fd51e968b1960f4a0fcd9363fac7f..8929d9abe8aa8368f2649a9dd29e2177f9c95424 100644 (file)
@@ -1719,6 +1719,11 @@ void snd_usb_audioformat_attributes_quirk(struct snd_usb_audio *chip,
                 */
                fp->attributes &= ~UAC_EP_CS_ATTR_FILL_MAX;
                break;
+       case USB_ID(0x1224, 0x2a25):  /* Jieli Technology USB PHY 2.0 */
+               /* mic works only when ep packet size is set to wMaxPacketSize */
+               fp->attributes |= UAC_EP_CS_ATTR_FILL_MAX;
+               break;
+
        }
 }
 
@@ -1884,10 +1889,14 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
                   QUIRK_FLAG_GET_SAMPLE_RATE),
        DEVICE_FLG(0x2912, 0x30c8, /* Audioengine D1 */
                   QUIRK_FLAG_GET_SAMPLE_RATE),
+       DEVICE_FLG(0x30be, 0x0101, /* Schiit Hel */
+                  QUIRK_FLAG_IGNORE_CTL_ERROR),
        DEVICE_FLG(0x413c, 0xa506, /* Dell AE515 sound bar */
                   QUIRK_FLAG_GET_SAMPLE_RATE),
        DEVICE_FLG(0x534d, 0x2109, /* MacroSilicon MS2109 */
                   QUIRK_FLAG_ALIGN_TRANSFER),
+       DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */
+                  QUIRK_FLAG_GET_SAMPLE_RATE),
 
        /* Vendor matches */
        VENDOR_FLG(0x045e, /* MS Lifecam */
@@ -1900,6 +1909,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
                   QUIRK_FLAG_CTL_MSG_DELAY | QUIRK_FLAG_IFACE_DELAY),
        VENDOR_FLG(0x07fd, /* MOTU */
                   QUIRK_FLAG_VALIDATE_RATES),
+       VENDOR_FLG(0x1235, /* Focusrite Novation */
+                  QUIRK_FLAG_VALIDATE_RATES),
        VENDOR_FLG(0x152a, /* Thesycon devices */
                   QUIRK_FLAG_DSD_RAW),
        VENDOR_FLG(0x1de7, /* Phoenix Audio */
index 1d84ec9db93bd34e77ab31643cd8b5534480f1a1..5859ca0a1439be4cc3276fd54d9cc20c3c28fa25 100644 (file)
@@ -784,6 +784,7 @@ struct snd_rawmidi_status {
 
 #define SNDRV_RAWMIDI_IOCTL_PVERSION   _IOR('W', 0x00, int)
 #define SNDRV_RAWMIDI_IOCTL_INFO       _IOR('W', 0x01, struct snd_rawmidi_info)
+#define SNDRV_RAWMIDI_IOCTL_USER_PVERSION _IOW('W', 0x02, int)
 #define SNDRV_RAWMIDI_IOCTL_PARAMS     _IOWR('W', 0x10, struct snd_rawmidi_params)
 #define SNDRV_RAWMIDI_IOCTL_STATUS     _IOWR('W', 0x20, struct snd_rawmidi_status)
 #define SNDRV_RAWMIDI_IOCTL_DROP       _IOW('W', 0x30, int)
index b0bf56c5f12021c9e994f03aad9390e43484778a..5a5bd74f55bd5095b2936298e5f2da552a8c415a 100755 (executable)
@@ -742,7 +742,7 @@ class DebugfsProvider(Provider):
         The fields are all available KVM debugfs files
 
         """
-        exempt_list = ['halt_poll_fail_ns', 'halt_poll_success_ns']
+        exempt_list = ['halt_poll_fail_ns', 'halt_poll_success_ns', 'halt_wait_ns']
         fields = [field for field in self.walkdir(PATH_DEBUGFS_KVM)[2]
                   if field not in exempt_list]
 
index 88d8825fc6f61fb91a954656090d73288dac693a..e4f83c304ec9240580bbeda3b50df34e081b61c0 100644 (file)
@@ -6894,7 +6894,8 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
 
        if (obj->gen_loader) {
                /* reset FDs */
-               btf__set_fd(obj->btf, -1);
+               if (obj->btf)
+                       btf__set_fd(obj->btf, -1);
                for (i = 0; i < obj->nr_maps; i++)
                        obj->maps[i].fd = -1;
                if (!err)
index 10911a8cad0f2b7e64abd5ed45f9954971004d99..2df880cefdaeea1ff93b00134a7fcc6971939656 100644 (file)
@@ -1649,11 +1649,17 @@ static bool btf_is_non_static(const struct btf_type *t)
 static int find_glob_sym_btf(struct src_obj *obj, Elf64_Sym *sym, const char *sym_name,
                             int *out_btf_sec_id, int *out_btf_id)
 {
-       int i, j, n = btf__get_nr_types(obj->btf), m, btf_id = 0;
+       int i, j, n, m, btf_id = 0;
        const struct btf_type *t;
        const struct btf_var_secinfo *vi;
        const char *name;
 
+       if (!obj->btf) {
+               pr_warn("failed to find BTF info for object '%s'\n", obj->filename);
+               return -EINVAL;
+       }
+
+       n = btf__get_nr_types(obj->btf);
        for (i = 1; i <= n; i++) {
                t = btf__type_by_id(obj->btf, i);
 
index 1fb8b49de1d628b2ac6749754114cfee0576c468..ea655318153f26b926c4e1ee7a6e2e9f55e35eee 100644 (file)
@@ -88,6 +88,7 @@ void strset__free(struct strset *set)
 
        hashmap__free(set->strs_hash);
        free(set->strs_data);
+       free(set);
 }
 
 size_t strset__data_size(const struct strset *set)
index c67c833991708547dc06f9d843bde8d7965be908..ce91a582f0e4114195cf6006d53639accf1703bd 100644 (file)
@@ -40,7 +40,7 @@ static int test_stat_cpu(void)
                .type   = PERF_TYPE_SOFTWARE,
                .config = PERF_COUNT_SW_TASK_CLOCK,
        };
-       int err, cpu, tmp;
+       int err, idx;
 
        cpus = perf_cpu_map__new(NULL);
        __T("failed to create cpus", cpus);
@@ -70,10 +70,10 @@ static int test_stat_cpu(void)
        perf_evlist__for_each_evsel(evlist, evsel) {
                cpus = perf_evsel__cpus(evsel);
 
-               perf_cpu_map__for_each_cpu(cpu, tmp, cpus) {
+               for (idx = 0; idx < perf_cpu_map__nr(cpus); idx++) {
                        struct perf_counts_values counts = { .val = 0 };
 
-                       perf_evsel__read(evsel, cpu, 0, &counts);
+                       perf_evsel__read(evsel, idx, 0, &counts);
                        __T("failed to read value for evsel", counts.val != 0);
                }
        }
index a184e4861627e73999c03ec50073fd6c92203891..33ae9334861a242ab59a68979d32ca3b7ea8aa68 100644 (file)
@@ -22,7 +22,7 @@ static int test_stat_cpu(void)
                .type   = PERF_TYPE_SOFTWARE,
                .config = PERF_COUNT_SW_CPU_CLOCK,
        };
-       int err, cpu, tmp;
+       int err, idx;
 
        cpus = perf_cpu_map__new(NULL);
        __T("failed to create cpus", cpus);
@@ -33,10 +33,10 @@ static int test_stat_cpu(void)
        err = perf_evsel__open(evsel, cpus, NULL);
        __T("failed to open evsel", err == 0);
 
-       perf_cpu_map__for_each_cpu(cpu, tmp, cpus) {
+       for (idx = 0; idx < perf_cpu_map__nr(cpus); idx++) {
                struct perf_counts_values counts = { .val = 0 };
 
-               perf_evsel__read(evsel, cpu, 0, &counts);
+               perf_evsel__read(evsel, idx, 0, &counts);
                __T("failed to read value for evsel", counts.val != 0);
        }
 
@@ -148,6 +148,7 @@ static int test_stat_user_read(int event)
        __T("failed to mmap evsel", err == 0);
 
        pc = perf_evsel__mmap_base(evsel, 0, 0);
+       __T("failed to get mmapped address", pc);
 
 #if defined(__i386__) || defined(__x86_64__)
        __T("userspace counter access not supported", pc->cap_user_rdpmc);
index bc821056aba902e5630076f61273fdc5fb1c83cb..0893436cc09f84c835bb044c6dd78cfaa14920d7 100644 (file)
@@ -684,7 +684,7 @@ static int elf_add_alternative(struct elf *elf,
        sec = find_section_by_name(elf, ".altinstructions");
        if (!sec) {
                sec = elf_create_section(elf, ".altinstructions",
-                                        SHF_ALLOC, size, 0);
+                                        SHF_ALLOC, 0, 0);
 
                if (!sec) {
                        WARN_ELF("elf_create_section");
index e5947fbb9e7a6d602686af947c5caf1f2158157f..06b5c164ae9318150aacdfd23a9a0fd90df49e39 100644 (file)
@@ -292,7 +292,7 @@ static int decode_instructions(struct objtool_file *file)
                    !strcmp(sec->name, ".entry.text"))
                        sec->noinstr = true;
 
-               for (offset = 0; offset < sec->len; offset += insn->len) {
+               for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
                        insn = malloc(sizeof(*insn));
                        if (!insn) {
                                WARN("malloc failed");
@@ -307,7 +307,7 @@ static int decode_instructions(struct objtool_file *file)
                        insn->offset = offset;
 
                        ret = arch_decode_instruction(file->elf, sec, offset,
-                                                     sec->len - offset,
+                                                     sec->sh.sh_size - offset,
                                                      &insn->len, &insn->type,
                                                      &insn->immediate,
                                                      &insn->stack_ops);
@@ -349,9 +349,9 @@ static struct instruction *find_last_insn(struct objtool_file *file,
 {
        struct instruction *insn = NULL;
        unsigned int offset;
-       unsigned int end = (sec->len > 10) ? sec->len - 10 : 0;
+       unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0;
 
-       for (offset = sec->len - 1; offset >= end && !insn; offset--)
+       for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--)
                insn = find_insn(file, sec, offset);
 
        return insn;
@@ -389,7 +389,7 @@ static int add_dead_ends(struct objtool_file *file)
                insn = find_insn(file, reloc->sym->sec, reloc->addend);
                if (insn)
                        insn = list_prev_entry(insn, list);
-               else if (reloc->addend == reloc->sym->sec->len) {
+               else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
                        insn = find_last_insn(file, reloc->sym->sec);
                        if (!insn) {
                                WARN("can't find unreachable insn at %s+0x%x",
@@ -424,7 +424,7 @@ reachable:
                insn = find_insn(file, reloc->sym->sec, reloc->addend);
                if (insn)
                        insn = list_prev_entry(insn, list);
-               else if (reloc->addend == reloc->sym->sec->len) {
+               else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
                        insn = find_last_insn(file, reloc->sym->sec);
                        if (!insn) {
                                WARN("can't find reachable insn at %s+0x%x",
@@ -1561,14 +1561,14 @@ static int read_unwind_hints(struct objtool_file *file)
                return -1;
        }
 
-       if (sec->len % sizeof(struct unwind_hint)) {
+       if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
                WARN("struct unwind_hint size mismatch");
                return -1;
        }
 
        file->hints = true;
 
-       for (i = 0; i < sec->len / sizeof(struct unwind_hint); i++) {
+       for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
                hint = (struct unwind_hint *)sec->data->d_buf + i;
 
                reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
index 8676c759872810d1e7cff3f7e2c5adc061164700..fee03b744a6ea698ee8ea44ca2368f4555a7d804 100644 (file)
@@ -286,10 +286,9 @@ static int read_sections(struct elf *elf)
                                return -1;
                        }
                }
-               sec->len = sec->sh.sh_size;
 
                if (sec->sh.sh_flags & SHF_EXECINSTR)
-                       elf->text_size += sec->len;
+                       elf->text_size += sec->sh.sh_size;
 
                list_add_tail(&sec->list, &elf->sections);
                elf_hash_add(section, &sec->hash, sec->idx);
@@ -509,6 +508,7 @@ int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
        list_add_tail(&reloc->list, &sec->reloc->reloc_list);
        elf_hash_add(reloc, &reloc->hash, reloc_hash(reloc));
 
+       sec->reloc->sh.sh_size += sec->reloc->sh.sh_entsize;
        sec->reloc->changed = true;
 
        return 0;
@@ -734,8 +734,8 @@ static int elf_add_string(struct elf *elf, struct section *strtab, char *str)
        data->d_size = strlen(str) + 1;
        data->d_align = 1;
 
-       len = strtab->len;
-       strtab->len += data->d_size;
+       len = strtab->sh.sh_size;
+       strtab->sh.sh_size += data->d_size;
        strtab->changed = true;
 
        return len;
@@ -790,9 +790,9 @@ struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
        data->d_align = 1;
        data->d_type = ELF_T_SYM;
 
-       sym->idx = symtab->len / sizeof(sym->sym);
+       sym->idx = symtab->sh.sh_size / sizeof(sym->sym);
 
-       symtab->len += data->d_size;
+       symtab->sh.sh_size += data->d_size;
        symtab->changed = true;
 
        symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
@@ -814,7 +814,7 @@ struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
                data->d_align = 4;
                data->d_type = ELF_T_WORD;
 
-               symtab_shndx->len += 4;
+               symtab_shndx->sh.sh_size += 4;
                symtab_shndx->changed = true;
        }
 
@@ -855,7 +855,6 @@ struct section *elf_create_section(struct elf *elf, const char *name,
        }
 
        sec->idx = elf_ndxscn(s);
-       sec->len = size;
        sec->changed = true;
 
        sec->data = elf_newdata(s);
@@ -979,63 +978,63 @@ static struct section *elf_create_reloc_section(struct elf *elf,
        }
 }
 
-static int elf_rebuild_rel_reloc_section(struct section *sec, int nr)
+static int elf_rebuild_rel_reloc_section(struct section *sec)
 {
        struct reloc *reloc;
-       int idx = 0, size;
+       int idx = 0;
        void *buf;
 
        /* Allocate a buffer for relocations */
-       size = nr * sizeof(GElf_Rel);
-       buf = malloc(size);
+       buf = malloc(sec->sh.sh_size);
        if (!buf) {
                perror("malloc");
                return -1;
        }
 
        sec->data->d_buf = buf;
-       sec->data->d_size = size;
+       sec->data->d_size = sec->sh.sh_size;
        sec->data->d_type = ELF_T_REL;
 
-       sec->sh.sh_size = size;
-
        idx = 0;
        list_for_each_entry(reloc, &sec->reloc_list, list) {
                reloc->rel.r_offset = reloc->offset;
                reloc->rel.r_info = GELF_R_INFO(reloc->sym->idx, reloc->type);
-               gelf_update_rel(sec->data, idx, &reloc->rel);
+               if (!gelf_update_rel(sec->data, idx, &reloc->rel)) {
+                       WARN_ELF("gelf_update_rel");
+                       return -1;
+               }
                idx++;
        }
 
        return 0;
 }
 
-static int elf_rebuild_rela_reloc_section(struct section *sec, int nr)
+static int elf_rebuild_rela_reloc_section(struct section *sec)
 {
        struct reloc *reloc;
-       int idx = 0, size;
+       int idx = 0;
        void *buf;
 
        /* Allocate a buffer for relocations with addends */
-       size = nr * sizeof(GElf_Rela);
-       buf = malloc(size);
+       buf = malloc(sec->sh.sh_size);
        if (!buf) {
                perror("malloc");
                return -1;
        }
 
        sec->data->d_buf = buf;
-       sec->data->d_size = size;
+       sec->data->d_size = sec->sh.sh_size;
        sec->data->d_type = ELF_T_RELA;
 
-       sec->sh.sh_size = size;
-
        idx = 0;
        list_for_each_entry(reloc, &sec->reloc_list, list) {
                reloc->rela.r_offset = reloc->offset;
                reloc->rela.r_addend = reloc->addend;
                reloc->rela.r_info = GELF_R_INFO(reloc->sym->idx, reloc->type);
-               gelf_update_rela(sec->data, idx, &reloc->rela);
+               if (!gelf_update_rela(sec->data, idx, &reloc->rela)) {
+                       WARN_ELF("gelf_update_rela");
+                       return -1;
+               }
                idx++;
        }
 
@@ -1044,16 +1043,9 @@ static int elf_rebuild_rela_reloc_section(struct section *sec, int nr)
 
 static int elf_rebuild_reloc_section(struct elf *elf, struct section *sec)
 {
-       struct reloc *reloc;
-       int nr;
-
-       nr = 0;
-       list_for_each_entry(reloc, &sec->reloc_list, list)
-               nr++;
-
        switch (sec->sh.sh_type) {
-       case SHT_REL:  return elf_rebuild_rel_reloc_section(sec, nr);
-       case SHT_RELA: return elf_rebuild_rela_reloc_section(sec, nr);
+       case SHT_REL:  return elf_rebuild_rel_reloc_section(sec);
+       case SHT_RELA: return elf_rebuild_rela_reloc_section(sec);
        default:       return -1;
        }
 }
@@ -1113,12 +1105,6 @@ int elf_write(struct elf *elf)
        /* Update changed relocation sections and section headers: */
        list_for_each_entry(sec, &elf->sections, list) {
                if (sec->changed) {
-                       if (sec->base &&
-                           elf_rebuild_reloc_section(elf, sec)) {
-                               WARN("elf_rebuild_reloc_section");
-                               return -1;
-                       }
-
                        s = elf_getscn(elf->elf, sec->idx);
                        if (!s) {
                                WARN_ELF("elf_getscn");
@@ -1129,6 +1115,12 @@ int elf_write(struct elf *elf)
                                return -1;
                        }
 
+                       if (sec->base &&
+                           elf_rebuild_reloc_section(elf, sec)) {
+                               WARN("elf_rebuild_reloc_section");
+                               return -1;
+                       }
+
                        sec->changed = false;
                        elf->changed = true;
                }
index e34395047530996207733546dbb38b03ed413510..075d8291b8546bda92af6e735cf58ad202c27069 100644 (file)
@@ -38,7 +38,6 @@ struct section {
        Elf_Data *data;
        char *name;
        int idx;
-       unsigned int len;
        bool changed, text, rodata, noinstr;
 };
 
index dc9b7dd314b05fbc0780e5699931476cdfdbe6c6..b5865e2450cbb5568446e4a4aad53d14f4d724d6 100644 (file)
@@ -204,7 +204,7 @@ int orc_create(struct objtool_file *file)
 
                /* Add a section terminator */
                if (!empty) {
-                       orc_list_add(&orc_list, &null, sec, sec->len);
+                       orc_list_add(&orc_list, &null, sec, sec->sh.sh_size);
                        nr++;
                }
        }
index bc925cf19e2dee7a3087829f4800ac71e0c6f6ca..06c3eacab3d53453706e795b01a02b781838a189 100644 (file)
@@ -58,6 +58,13 @@ void __weak arch_handle_alternative(unsigned short feature, struct special_alt *
 {
 }
 
+static void reloc_to_sec_off(struct reloc *reloc, struct section **sec,
+                            unsigned long *off)
+{
+       *sec = reloc->sym->sec;
+       *off = reloc->sym->offset + reloc->addend;
+}
+
 static int get_alt_entry(struct elf *elf, struct special_entry *entry,
                         struct section *sec, int idx,
                         struct special_alt *alt)
@@ -91,14 +98,8 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
                WARN_FUNC("can't find orig reloc", sec, offset + entry->orig);
                return -1;
        }
-       if (orig_reloc->sym->type != STT_SECTION) {
-               WARN_FUNC("don't know how to handle non-section reloc symbol %s",
-                          sec, offset + entry->orig, orig_reloc->sym->name);
-               return -1;
-       }
 
-       alt->orig_sec = orig_reloc->sym->sec;
-       alt->orig_off = orig_reloc->addend;
+       reloc_to_sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off);
 
        if (!entry->group || alt->new_len) {
                new_reloc = find_reloc_by_dest(elf, sec, offset + entry->new);
@@ -116,8 +117,7 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
                if (arch_is_retpoline(new_reloc->sym))
                        return 1;
 
-               alt->new_sec = new_reloc->sym->sec;
-               alt->new_off = (unsigned int)new_reloc->addend;
+               reloc_to_sec_off(new_reloc, &alt->new_sec, &alt->new_off);
 
                /* _ASM_EXTABLE_EX hack */
                if (alt->new_off >= 0x7ffffff0)
@@ -159,13 +159,13 @@ int special_get_alts(struct elf *elf, struct list_head *alts)
                if (!sec)
                        continue;
 
-               if (sec->len % entry->size != 0) {
+               if (sec->sh.sh_size % entry->size != 0) {
                        WARN("%s size not a multiple of %d",
                             sec->name, entry->size);
                        return -1;
                }
 
-               nr_entries = sec->len / entry->size;
+               nr_entries = sec->sh.sh_size / entry->size;
 
                for (idx = 0; idx < nr_entries; idx++) {
                        alt = malloc(sizeof(*alt));
index 52152d156ad907e22c430e7ac48ef64c42f6032f..79936355d819a66969b90adcbd6e427a4c26b0a5 100644 (file)
@@ -164,7 +164,7 @@ const char unwinding_data[n]: an array of unwinding data, consisting of the EH F
 The EH Frame header follows the Linux Standard Base (LSB) specification as described in the document at https://refspecs.linuxfoundation.org/LSB_1.3.0/gLSB/gLSB/ehframehdr.html
 
 
-The EH Frame follows the LSB specicfication as described in the document at https://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html
+The EH Frame follows the LSB specification as described in the document at https://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html
 
 
 NOTE: The mapped_size is generally either the same as unwind_data_size (if the unwinding data was mapped in memory by the running process) or zero (if the unwinding data is not mapped by the process). If the unwinding data was not mapped, then only the EH Frame Header will be read, which can be used to specify FP based unwinding for a function which does not have unwinding information.
index de6beedb72834b45e29175b674d6e49a2ab0a49d..3b6a2c84ea027e955ba6cb7e5ba290f0d876a5b6 100644 (file)
@@ -261,7 +261,7 @@ COALESCE
 User can specify how to sort offsets for cacheline.
 
 Following fields are available and governs the final
-output fields set for caheline offsets output:
+output fields set for cacheline offsets output:
 
   tid   - coalesced by process TIDs
   pid   - coalesced by process PIDs
index 184ba62420f099707370e7db674caf7900fe3254..db465fa7ee918a19f39c2fbd03b451f3f9adffa8 100644 (file)
@@ -883,7 +883,7 @@ and "r" can be combined to get calls and returns.
 
 "Transactions" events correspond to the start or end of transactions. The
 'flags' field can be used in perf script to determine whether the event is a
-tranasaction start, commit or abort.
+transaction start, commit or abort.
 
 Note that "instructions", "branches" and "transactions" events depend on code
 flow packets which can be disabled by using the config term "branch=0".  Refer
index 74d7745921968b3a7286311f740d3f5aab0315e2..1b4d452923d7e2327f378b2922cdc7ae4856a69d 100644 (file)
@@ -44,7 +44,7 @@ COMMON OPTIONS
 
 -f::
 --force::
-       Don't complan, do it.
+       Don't complain, do it.
 
 REPORT OPTIONS
 --------------
index 5a1f68122f50aaaa13baa1d8491a40dbdce92f47..fa4f39d305a7d576107bd49f38d1a6c91819f98c 100644 (file)
@@ -54,7 +54,7 @@ all sched_wakeup events in the system:
 Traces meant to be processed using a script should be recorded with
 the above option: -a to enable system-wide collection.
 
-The format file for the sched_wakep event defines the following fields
+The format file for the sched_wakeup event defines the following fields
 (see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):
 
 ----
index 0250dc61cf98266898b9444db332402608f93447..cf4b7f4b625adc7a3c1bc58076f395bbc308bf2f 100644 (file)
@@ -448,7 +448,7 @@ all sched_wakeup events in the system:
 Traces meant to be processed using a script should be recorded with
 the above option: -a to enable system-wide collection.
 
-The format file for the sched_wakep event defines the following fields
+The format file for the sched_wakeup event defines the following fields
 (see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):
 
 ----
index 4c9310be6acc9089dae38c45f5f1ce42ed058b7e..7e6fb7cbc0f423b09bdb1fb3f5dd243bbe2cd973 100644 (file)
@@ -385,7 +385,7 @@ Aggregate counts per physical processor for system-wide mode measurements.
 Print metrics or metricgroups specified in a comma separated list.
 For a group all metrics from the group are added.
 The events from the metrics are automatically measured.
-See perf list output for the possble metrics and metricgroups.
+See perf list output for the possible metrics and metricgroups.
 
 -A::
 --no-aggr::
index c6302df4cf29b9afbad34cc6165a29d15b8aa51d..a15b93fdcf50f910d596ec00dac712837154859d 100644 (file)
@@ -2,7 +2,7 @@ Using TopDown metrics in user space
 -----------------------------------
 
 Intel CPUs (since Sandy Bridge and Silvermont) support a TopDown
-methology to break down CPU pipeline execution into 4 bottlenecks:
+methodology to break down CPU pipeline execution into 4 bottlenecks:
 frontend bound, backend bound, bad speculation, retiring.
 
 For more details on Topdown see [1][5]
index 446180401e267f27cc35852572b77131e3353fe5..14e3e8d702a023df558d14d57def0aa37db83d42 100644 (file)
@@ -143,7 +143,7 @@ FEATURE_CHECK_LDFLAGS-libcrypto = -lcrypto
 ifdef CSINCLUDES
   LIBOPENCSD_CFLAGS := -I$(CSINCLUDES)
 endif
-OPENCSDLIBS := -lopencsd_c_api -lopencsd
+OPENCSDLIBS := -lopencsd_c_api -lopencsd -lstdc++
 ifdef CSLIBS
   LIBOPENCSD_LDFLAGS := -L$(CSLIBS)
 endif
index e04313c4d8409a9429fcd7d8080610d73441b783..5cd702062a042630bbd244cf9d188e4f9118d81d 100644 (file)
@@ -802,7 +802,7 @@ endif
 
 $(patsubst perf-%,%.o,$(PROGRAMS)): $(wildcard */*.h)
 
-LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ) 'EXTRA_CFLAGS=$(EXTRA_CFLAGS)' 'LDFLAGS=$(LDFLAGS)'
+LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ) 'EXTRA_CFLAGS=$(EXTRA_CFLAGS)' 'LDFLAGS=$(filter-out -static,$(LDFLAGS))'
 
 $(LIBTRACEEVENT): FORCE
        $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent.a
index c7c7ec0812d5aef4c4399ca3e86ecdf69d0c9815..5fc6a2a3dbc5fd77215047d7d2c5e8df92f165c4 100644 (file)
@@ -8,10 +8,10 @@
 #include <linux/coresight-pmu.h>
 #include <linux/zalloc.h>
 
-#include "../../util/auxtrace.h"
-#include "../../util/debug.h"
-#include "../../util/evlist.h"
-#include "../../util/pmu.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/debug.h"
+#include "../../../util/evlist.h"
+#include "../../../util/pmu.h"
 #include "cs-etm.h"
 #include "arm-spe.h"
 
index 515aae470e23b0cf601d0711ecb355adcbcda043..293a23bf8be39a25a6a3813f60e082e62062bbb3 100644 (file)
 #include <linux/zalloc.h>
 
 #include "cs-etm.h"
-#include "../../util/debug.h"
-#include "../../util/record.h"
-#include "../../util/auxtrace.h"
-#include "../../util/cpumap.h"
-#include "../../util/event.h"
-#include "../../util/evlist.h"
-#include "../../util/evsel.h"
-#include "../../util/perf_api_probe.h"
-#include "../../util/evsel_config.h"
-#include "../../util/pmu.h"
-#include "../../util/cs-etm.h"
+#include "../../../util/debug.h"
+#include "../../../util/record.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/cpumap.h"
+#include "../../../util/event.h"
+#include "../../../util/evlist.h"
+#include "../../../util/evsel.h"
+#include "../../../util/perf_api_probe.h"
+#include "../../../util/evsel_config.h"
+#include "../../../util/pmu.h"
+#include "../../../util/cs-etm.h"
 #include <internal/lib.h> // page_size
-#include "../../util/session.h"
+#include "../../../util/session.h"
 
 #include <errno.h>
 #include <stdlib.h>
index 2864e2e3776d5105d39f83f4de94d306df4a07ac..2833e101a7c6407263130e9948a06a2caa32bc4b 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-#include "../../util/perf_regs.h"
+#include "../../../util/perf_regs.h"
 
 const struct sample_reg sample_reg_masks[] = {
        SMPL_REG_END
index bbc297a7e2e3517059f9c3b33816deb69074ac20..b8b23b9dc5987a4dc118ee7bc45b162d08a57111 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/string.h>
 
 #include "arm-spe.h"
-#include "../../util/pmu.h"
+#include "../../../util/pmu.h"
 
 struct perf_event_attr
 *perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
index 36ba4c69c3c55cfec6c33a4b5cd52ee31d36f538..b7692cb0c73398e4f8c6bc6df4ebd7c83359fd3f 100644 (file)
@@ -1,8 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <elfutils/libdwfl.h>
-#include "../../util/unwind-libdw.h"
-#include "../../util/perf_regs.h"
-#include "../../util/event.h"
+#include "../../../util/unwind-libdw.h"
+#include "../../../util/perf_regs.h"
+#include "../../../util/event.h"
 
 bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
 {
index 3a550225dfafd37d579ba0a464e9ec067485660d..438906bf0014abb75d16db98ac304a20e7e5fb1b 100644 (file)
@@ -3,8 +3,8 @@
 #include <errno.h>
 #include <libunwind.h>
 #include "perf_regs.h"
-#include "../../util/unwind.h"
-#include "../../util/debug.h"
+#include "../../../util/unwind.h"
+#include "../../../util/debug.h"
 
 int libunwind__arch_reg_id(int regnum)
 {
index eeafe97b8105bbd0b1c36cf0b8393624d88eb48e..792cd75ade33d8707dd9dcab36962d5244c0977b 100644 (file)
@@ -432,7 +432,7 @@ void iostat_print_metric(struct perf_stat_config *config, struct evsel *evsel,
        u8 die = ((struct iio_root_port *)evsel->priv)->die;
        struct perf_counts_values *count = perf_counts(evsel->counts, die, 0);
 
-       if (count->run && count->ena) {
+       if (count && count->run && count->ena) {
                if (evsel->prev_raw_counts && !out->force_header) {
                        struct perf_counts_values *prev_count =
                                perf_counts(evsel->prev_raw_counts, die, 0);
index f6e87b7be5fa22dd1c628b0c76df923bb981eb3b..f0ecfda34ecebaccf8e85e029f9fce50379bdf29 100644 (file)
@@ -2408,6 +2408,8 @@ int cmd_stat(int argc, const char **argv)
                        goto out;
                } else if (verbose)
                        iostat_list(evsel_list, &stat_config);
+               if (iostat_mode == IOSTAT_RUN && !target__has_cpu(&target))
+                       target.system_wide = true;
        }
 
        if (add_default_attributes())
index 84a0cedf1fd9bcfb7bf648a48640320a578fe18b..f1f2965f6775cad71bb0d47f3b15a9064d9c3b75 100644 (file)
   {
     "EventCode": "0x4e010",
     "EventName": "PM_GCT_NOSLOT_IC_L3MISS",
-    "BriefDescription": "Gct empty for this thread due to icach l3 miss",
+    "BriefDescription": "Gct empty for this thread due to icache l3 miss",
     "PublicDescription": ""
   },
   {
index 6731b3cf0c2fc9b7c353f582c06fb186dfb847f4..7c887d37b893466ae84fa7d82c646cd1c63f0192 100644 (file)
@@ -1285,6 +1285,7 @@ int main(int argc, char *argv[])
        }
 
        free_arch_std_events();
+       free_sys_event_tables();
        free(mapfile);
        return 0;
 
@@ -1306,6 +1307,7 @@ err_close_eventsfp:
                create_empty_mapping(output_file);
 err_out:
        free_arch_std_events();
+       free_sys_event_tables();
        free(mapfile);
        return ret;
 }
index d9e99b3f77e66e707cca6c8e115120b0105c84b3..d8ea6a88163fbd1405cd072ad6cd94d4b68e029e 100644 (file)
@@ -68,3 +68,100 @@ fd=10
 type=0
 config=5
 optional=1
+
+# PERF_TYPE_RAW / slots (0x400)
+[event11:base-stat]
+fd=11
+group_fd=-1
+type=4
+config=1024
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-retiring (0x8000)
+[event12:base-stat]
+fd=12
+group_fd=11
+type=4
+config=32768
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+[event13:base-stat]
+fd=13
+group_fd=11
+type=4
+config=33024
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+[event14:base-stat]
+fd=14
+group_fd=11
+type=4
+config=33280
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+[event15:base-stat]
+fd=15
+group_fd=11
+type=4
+config=33536
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+[event16:base-stat]
+fd=16
+group_fd=11
+type=4
+config=33792
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+[event17:base-stat]
+fd=17
+group_fd=11
+type=4
+config=34048
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+[event18:base-stat]
+fd=18
+group_fd=11
+type=4
+config=34304
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+[event19:base-stat]
+fd=19
+group_fd=11
+type=4
+config=34560
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
index 8b04a055d1548d03a84c53ad719e249eeb807197..b656ab93c5bf910ded52eebca2fa7f985d7b6221 100644 (file)
@@ -70,12 +70,109 @@ type=0
 config=5
 optional=1
 
+# PERF_TYPE_RAW / slots (0x400)
+[event11:base-stat]
+fd=11
+group_fd=-1
+type=4
+config=1024
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-retiring (0x8000)
+[event12:base-stat]
+fd=12
+group_fd=11
+type=4
+config=32768
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+[event13:base-stat]
+fd=13
+group_fd=11
+type=4
+config=33024
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+[event14:base-stat]
+fd=14
+group_fd=11
+type=4
+config=33280
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+[event15:base-stat]
+fd=15
+group_fd=11
+type=4
+config=33536
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+[event16:base-stat]
+fd=16
+group_fd=11
+type=4
+config=33792
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+[event17:base-stat]
+fd=17
+group_fd=11
+type=4
+config=34048
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+[event18:base-stat]
+fd=18
+group_fd=11
+type=4
+config=34304
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+[event19:base-stat]
+fd=19
+group_fd=11
+type=4
+config=34560
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
 # PERF_TYPE_HW_CACHE /
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event11:base-stat]
-fd=11
+[event20:base-stat]
+fd=20
 type=3
 config=0
 optional=1
@@ -84,8 +181,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event12:base-stat]
-fd=12
+[event21:base-stat]
+fd=21
 type=3
 config=65536
 optional=1
@@ -94,8 +191,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event13:base-stat]
-fd=13
+[event22:base-stat]
+fd=22
 type=3
 config=2
 optional=1
@@ -104,8 +201,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event14:base-stat]
-fd=14
+[event23:base-stat]
+fd=23
 type=3
 config=65538
 optional=1
index 4fca9f1bfbf8d36932b814d70750c6e826c9cc05..97625090a1c4c299b3ee528b4c084b862c3cfab0 100644 (file)
@@ -70,12 +70,109 @@ type=0
 config=5
 optional=1
 
+# PERF_TYPE_RAW / slots (0x400)
+[event11:base-stat]
+fd=11
+group_fd=-1
+type=4
+config=1024
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-retiring (0x8000)
+[event12:base-stat]
+fd=12
+group_fd=11
+type=4
+config=32768
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+[event13:base-stat]
+fd=13
+group_fd=11
+type=4
+config=33024
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+[event14:base-stat]
+fd=14
+group_fd=11
+type=4
+config=33280
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+[event15:base-stat]
+fd=15
+group_fd=11
+type=4
+config=33536
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+[event16:base-stat]
+fd=16
+group_fd=11
+type=4
+config=33792
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+[event17:base-stat]
+fd=17
+group_fd=11
+type=4
+config=34048
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+[event18:base-stat]
+fd=18
+group_fd=11
+type=4
+config=34304
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+[event19:base-stat]
+fd=19
+group_fd=11
+type=4
+config=34560
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
 # PERF_TYPE_HW_CACHE /
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event11:base-stat]
-fd=11
+[event20:base-stat]
+fd=20
 type=3
 config=0
 optional=1
@@ -84,8 +181,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event12:base-stat]
-fd=12
+[event21:base-stat]
+fd=21
 type=3
 config=65536
 optional=1
@@ -94,8 +191,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event13:base-stat]
-fd=13
+[event22:base-stat]
+fd=22
 type=3
 config=2
 optional=1
@@ -104,8 +201,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event14:base-stat]
-fd=14
+[event23:base-stat]
+fd=23
 type=3
 config=65538
 optional=1
@@ -114,8 +211,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1I                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event15:base-stat]
-fd=15
+[event24:base-stat]
+fd=24
 type=3
 config=1
 optional=1
@@ -124,8 +221,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1I                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event16:base-stat]
-fd=16
+[event25:base-stat]
+fd=25
 type=3
 config=65537
 optional=1
@@ -134,8 +231,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_DTLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event17:base-stat]
-fd=17
+[event26:base-stat]
+fd=26
 type=3
 config=3
 optional=1
@@ -144,8 +241,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_DTLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event18:base-stat]
-fd=18
+[event27:base-stat]
+fd=27
 type=3
 config=65539
 optional=1
@@ -154,8 +251,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_ITLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event19:base-stat]
-fd=19
+[event28:base-stat]
+fd=28
 type=3
 config=4
 optional=1
@@ -164,8 +261,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_ITLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event20:base-stat]
-fd=20
+[event29:base-stat]
+fd=29
 type=3
 config=65540
 optional=1
index 4bb58e1c82a676c7b5b7c7ab411f6d93f424864e..d555042e3fbfe6f978a074d073f35adbce333c6b 100644 (file)
@@ -70,12 +70,109 @@ type=0
 config=5
 optional=1
 
+# PERF_TYPE_RAW / slots (0x400)
+[event11:base-stat]
+fd=11
+group_fd=-1
+type=4
+config=1024
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-retiring (0x8000)
+[event12:base-stat]
+fd=12
+group_fd=11
+type=4
+config=32768
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+[event13:base-stat]
+fd=13
+group_fd=11
+type=4
+config=33024
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+[event14:base-stat]
+fd=14
+group_fd=11
+type=4
+config=33280
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+[event15:base-stat]
+fd=15
+group_fd=11
+type=4
+config=33536
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+[event16:base-stat]
+fd=16
+group_fd=11
+type=4
+config=33792
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+[event17:base-stat]
+fd=17
+group_fd=11
+type=4
+config=34048
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+[event18:base-stat]
+fd=18
+group_fd=11
+type=4
+config=34304
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+[event19:base-stat]
+fd=19
+group_fd=11
+type=4
+config=34560
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
 # PERF_TYPE_HW_CACHE /
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event11:base-stat]
-fd=11
+[event20:base-stat]
+fd=20
 type=3
 config=0
 optional=1
@@ -84,8 +181,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event12:base-stat]
-fd=12
+[event21:base-stat]
+fd=21
 type=3
 config=65536
 optional=1
@@ -94,8 +191,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event13:base-stat]
-fd=13
+[event22:base-stat]
+fd=22
 type=3
 config=2
 optional=1
@@ -104,8 +201,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event14:base-stat]
-fd=14
+[event23:base-stat]
+fd=23
 type=3
 config=65538
 optional=1
@@ -114,8 +211,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1I                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event15:base-stat]
-fd=15
+[event24:base-stat]
+fd=24
 type=3
 config=1
 optional=1
@@ -124,8 +221,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1I                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event16:base-stat]
-fd=16
+[event25:base-stat]
+fd=25
 type=3
 config=65537
 optional=1
@@ -134,8 +231,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_DTLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event17:base-stat]
-fd=17
+[event26:base-stat]
+fd=26
 type=3
 config=3
 optional=1
@@ -144,8 +241,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_DTLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event18:base-stat]
-fd=18
+[event27:base-stat]
+fd=27
 type=3
 config=65539
 optional=1
@@ -154,8 +251,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_ITLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event19:base-stat]
-fd=19
+[event28:base-stat]
+fd=28
 type=3
 config=4
 optional=1
@@ -164,8 +261,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_ITLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event20:base-stat]
-fd=20
+[event29:base-stat]
+fd=29
 type=3
 config=65540
 optional=1
@@ -174,8 +271,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event21:base-stat]
-fd=21
+[event30:base-stat]
+fd=30
 type=3
 config=512
 optional=1
@@ -184,8 +281,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event22:base-stat]
-fd=22
+[event31:base-stat]
+fd=31
 type=3
 config=66048
 optional=1
index 9866cddebf237c2fdf8b9c1d28501e9a4b277962..9b4a765e4b7340d8a58ef6a2afd26584a6759a8c 100644 (file)
@@ -229,8 +229,8 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
                            struct thread *thread, struct state *state)
 {
        struct addr_location al;
-       unsigned char buf1[BUFSZ];
-       unsigned char buf2[BUFSZ];
+       unsigned char buf1[BUFSZ] = {0};
+       unsigned char buf2[BUFSZ] = {0};
        size_t ret_len;
        u64 objdump_addr;
        const char *objdump_name;
index a288035eb36269bcb9d062df3e648faf7ed7bb2c..c756284b3b1353292b677d7f32b923877515c56e 100644 (file)
 /* For bsearch. We try to unwind functions in shared object. */
 #include <stdlib.h>
 
+/*
+ * The test will assert frames are on the stack but tail call optimizations lose
+ * the frame of the caller. Clang can disable this optimization on a called
+ * function but GCC currently (11/2020) lacks this attribute. The barrier is
+ * used to inhibit tail calls in these cases.
+ */
+#ifdef __has_attribute
+#if __has_attribute(disable_tail_calls)
+#define NO_TAIL_CALL_ATTRIBUTE __attribute__((disable_tail_calls))
+#define NO_TAIL_CALL_BARRIER
+#endif
+#endif
+#ifndef NO_TAIL_CALL_ATTRIBUTE
+#define NO_TAIL_CALL_ATTRIBUTE
+#define NO_TAIL_CALL_BARRIER __asm__ __volatile__("" : : : "memory");
+#endif
+
 static int mmap_handler(struct perf_tool *tool __maybe_unused,
                        union perf_event *event,
                        struct perf_sample *sample,
@@ -91,7 +108,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
        return strcmp((const char *) symbol, funcs[idx]);
 }
 
-noinline int test_dwarf_unwind__thread(struct thread *thread)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__thread(struct thread *thread)
 {
        struct perf_sample sample;
        unsigned long cnt = 0;
@@ -122,7 +139,7 @@ noinline int test_dwarf_unwind__thread(struct thread *thread)
 
 static int global_unwind_retval = -INT_MAX;
 
-noinline int test_dwarf_unwind__compare(void *p1, void *p2)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__compare(void *p1, void *p2)
 {
        /* Any possible value should be 'thread' */
        struct thread *thread = *(struct thread **)p1;
@@ -141,7 +158,7 @@ noinline int test_dwarf_unwind__compare(void *p1, void *p2)
        return p1 - p2;
 }
 
-noinline int test_dwarf_unwind__krava_3(struct thread *thread)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_3(struct thread *thread)
 {
        struct thread *array[2] = {thread, thread};
        void *fp = &bsearch;
@@ -160,14 +177,22 @@ noinline int test_dwarf_unwind__krava_3(struct thread *thread)
        return global_unwind_retval;
 }
 
-noinline int test_dwarf_unwind__krava_2(struct thread *thread)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_2(struct thread *thread)
 {
-       return test_dwarf_unwind__krava_3(thread);
+       int ret;
+
+       ret =  test_dwarf_unwind__krava_3(thread);
+       NO_TAIL_CALL_BARRIER;
+       return ret;
 }
 
-noinline int test_dwarf_unwind__krava_1(struct thread *thread)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_1(struct thread *thread)
 {
-       return test_dwarf_unwind__krava_2(thread);
+       int ret;
+
+       ret =  test_dwarf_unwind__krava_2(thread);
+       NO_TAIL_CALL_BARRIER;
+       return ret;
 }
 
 int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unused)
index 4fb5e90d7a57ae481873b2d632dc60a992ea165d..60ce5908c6640022c472acc6d0611195a4c93284 100644 (file)
@@ -801,7 +801,7 @@ int perf_config_set(struct perf_config_set *set,
                                  section->name, item->name);
                        ret = fn(key, value, data);
                        if (ret < 0) {
-                               pr_err("Error: wrong config key-value pair %s=%s\n",
+                               pr_err("Error in the given config file: wrong config key-value pair %s=%s\n",
                                       key, value);
                                /*
                                 * Can't be just a 'break', as perf_config_set__for_each_entry()
index 069c2cfdd3be6a7405196f697de057ea5377f9fa..352f16076e01f70ef91ebebdd7809d8cf4c0c05f 100644 (file)
@@ -2116,7 +2116,7 @@ fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
 static int __perf_session__process_decomp_events(struct perf_session *session)
 {
        s64 skip;
-       u64 size, file_pos = 0;
+       u64 size;
        struct decomp *decomp = session->decomp_last;
 
        if (!decomp)
@@ -2132,7 +2132,7 @@ static int __perf_session__process_decomp_events(struct perf_session *session)
                size = event->header.size;
 
                if (size < sizeof(struct perf_event_header) ||
-                   (skip = perf_session__process_event(session, event, file_pos)) < 0) {
+                   (skip = perf_session__process_event(session, event, decomp->file_pos)) < 0) {
                        pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
                                decomp->file_pos + decomp->head, event->header.size, event->header.type);
                        return -EINVAL;
index 5a931456e71851c3a799e2a348e968ec666d484e..ac35c61f65f5fd43457957f178f3ca72721bfedc 100755 (executable)
@@ -16,7 +16,7 @@ assert sys.version_info >= (3, 7), "Python version is too old"
 
 from collections import namedtuple
 from enum import Enum, auto
-from typing import Iterable
+from typing import Iterable, Sequence
 
 import kunit_config
 import kunit_json
@@ -186,6 +186,26 @@ def run_tests(linux: kunit_kernel.LinuxSourceTree,
                                exec_result.elapsed_time))
        return parse_result
 
+# Problem:
+# $ kunit.py run --json
+# works as one would expect and prints the parsed test results as JSON.
+# $ kunit.py run --json suite_name
+# would *not* pass suite_name as the filter_glob and print as json.
+# argparse will consider it to be another way of writing
+# $ kunit.py run --json=suite_name
+# i.e. it would run all tests, and dump the json to a `suite_name` file.
+# So we hackily automatically rewrite --json => --json=stdout
+pseudo_bool_flag_defaults = {
+               '--json': 'stdout',
+               '--raw_output': 'kunit',
+}
+def massage_argv(argv: Sequence[str]) -> Sequence[str]:
+       def massage_arg(arg: str) -> str:
+               if arg not in pseudo_bool_flag_defaults:
+                       return arg
+               return  f'{arg}={pseudo_bool_flag_defaults[arg]}'
+       return list(map(massage_arg, argv))
+
 def add_common_opts(parser) -> None:
        parser.add_argument('--build_dir',
                            help='As in the make command, it specifies the build '
@@ -303,7 +323,7 @@ def main(argv, linux=None):
                                  help='Specifies the file to read results from.',
                                  type=str, nargs='?', metavar='input_file')
 
-       cli_args = parser.parse_args(argv)
+       cli_args = parser.parse_args(massage_argv(argv))
 
        if get_kernel_root_path():
                os.chdir(get_kernel_root_path())
index 619c4554cbff247bc1f2c9c6cb61f01c61d6b747..1edcc8373b4e3195a5b88634bcb45964381abcc2 100755 (executable)
@@ -408,6 +408,14 @@ class KUnitMainTest(unittest.TestCase):
                        self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
                        self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
 
+       def test_run_raw_output_does_not_take_positional_args(self):
+               # --raw_output is a string flag, but we don't want it to consume
+               # any positional arguments, only ones after an '='
+               self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
+               kunit.main(['run', '--raw_output', 'filter_glob'], self.linux_source_mock)
+               self.linux_source_mock.run_kernel.assert_called_once_with(
+                       args=None, build_dir='.kunit', filter_glob='filter_glob', timeout=300)
+
        def test_exec_timeout(self):
                timeout = 3453
                kunit.main(['exec', '--timeout', str(timeout)], self.linux_source_mock)
index 866531c08e4f9da1e892b1c7cd15e24552774b81..799b88152e9e8d95fbd89da55f77a7b3247aba65 100644 (file)
@@ -375,7 +375,8 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o:                         \
                     $(TRUNNER_BPF_PROGS_DIR)/%.c                       \
                     $(TRUNNER_BPF_PROGS_DIR)/*.h                       \
                     $$(INCLUDE_DIR)/vmlinux.h                          \
-                    $(wildcard $(BPFDIR)/bpf_*.h) | $(TRUNNER_OUTPUT)
+                    $(wildcard $(BPFDIR)/bpf_*.h)                      \
+                    | $(TRUNNER_OUTPUT) $$(BPFOBJ)
        $$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@,                      \
                                          $(TRUNNER_BPF_CFLAGS))
 
index 59ea56945e6cd6819dc12fe337f324d784449e48..b497bb85b667f78f0bfa393e5ffaca0766b345f0 100755 (executable)
@@ -112,6 +112,14 @@ setup()
        ip netns add "${NS2}"
        ip netns add "${NS3}"
 
+       # rp_filter gets confused by what these tests are doing, so disable it
+       ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0
+       ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0
+       ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0
+       ip netns exec ${NS1} sysctl -wq net.ipv4.conf.default.rp_filter=0
+       ip netns exec ${NS2} sysctl -wq net.ipv4.conf.default.rp_filter=0
+       ip netns exec ${NS3} sysctl -wq net.ipv4.conf.default.rp_filter=0
+
        ip link add veth1 type veth peer name veth2
        ip link add veth3 type veth peer name veth4
        ip link add veth5 type veth peer name veth6
@@ -236,11 +244,6 @@ setup()
        ip -netns ${NS1} -6 route add ${IPv6_GRE}/128 dev veth5 via ${IPv6_6} ${VRF}
        ip -netns ${NS2} -6 route add ${IPv6_GRE}/128 dev veth7 via ${IPv6_8} ${VRF}
 
-       # rp_filter gets confused by what these tests are doing, so disable it
-       ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0
-       ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0
-       ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0
-
        TMPFILE=$(mktemp /tmp/test_lwt_ip_encap.XXXXXX)
 
        sleep 1  # reduce flakiness
index 4de902ea14d82be9c06908e62e5d9bdb771a6279..de1c4e6de0b2110ce6c91bf248ae8e946d4b6105 100644 (file)
@@ -1,10 +1,13 @@
 // SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#define __EXPORTED_HEADERS__
+
 #include <stdio.h>
 #include <stdlib.h>
 #include <unistd.h>
 #include <string.h>
 #include <errno.h>
-#include <linux/fcntl.h>
+#include <fcntl.h>
 #include <malloc.h>
 
 #include <sys/ioctl.h>
index 5f5b2ba3e5572a37eb2afe53f2196423978bb711..60c02b482be8341ce795cab1238cc4772ebb942e 100644 (file)
@@ -11,8 +11,8 @@ SYSTEM="syscalls"
 EVENT="sys_enter_openat"
 FIELD="filename"
 EPROBE="eprobe_open"
-
-echo "e:$EPROBE $SYSTEM/$EVENT file=+0(\$filename):ustring" >> dynamic_events
+OPTIONS="file=+0(\$filename):ustring"
+echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
 
 grep -q "$EPROBE" dynamic_events
 test -d events/eprobes/$EPROBE
@@ -37,4 +37,54 @@ echo "-:$EPROBE" >> dynamic_events
 ! grep -q "$EPROBE" dynamic_events
 ! test -d events/eprobes/$EPROBE
 
+# test various ways to remove the probe (already tested with just event name)
+
+# With group name
+echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+grep -q "$EPROBE" dynamic_events
+test -d events/eprobes/$EPROBE
+echo "-:eprobes/$EPROBE" >> dynamic_events
+! grep -q "$EPROBE" dynamic_events
+! test -d events/eprobes/$EPROBE
+
+# With group name and system/event
+echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+grep -q "$EPROBE" dynamic_events
+test -d events/eprobes/$EPROBE
+echo "-:eprobes/$EPROBE $SYSTEM/$EVENT" >> dynamic_events
+! grep -q "$EPROBE" dynamic_events
+! test -d events/eprobes/$EPROBE
+
+# With just event name and system/event
+echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+grep -q "$EPROBE" dynamic_events
+test -d events/eprobes/$EPROBE
+echo "-:$EPROBE $SYSTEM/$EVENT" >> dynamic_events
+! grep -q "$EPROBE" dynamic_events
+! test -d events/eprobes/$EPROBE
+
+# With just event name and system/event and options
+echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+grep -q "$EPROBE" dynamic_events
+test -d events/eprobes/$EPROBE
+echo "-:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+! grep -q "$EPROBE" dynamic_events
+! test -d events/eprobes/$EPROBE
+
+# With group name and system/event and options
+echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+grep -q "$EPROBE" dynamic_events
+test -d events/eprobes/$EPROBE
+echo "-:eprobes/$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+! grep -q "$EPROBE" dynamic_events
+! test -d events/eprobes/$EPROBE
+
+# Finally make sure what is in the dynamic_events file clears it too
+echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+LINE=`sed -e '/$EPROBE/s/^e/-/' < dynamic_events`
+test -d events/eprobes/$EPROBE
+echo "-:eprobes/$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+! grep -q "$EPROBE" dynamic_events
+! test -d events/eprobes/$EPROBE
+
 clear_trace
index 618bf9bc7f3fd17ee4214d0dde2455323bb3a36c..b8dbabe24ac226eed4e9e96155b5390a1d3a88aa 100644 (file)
@@ -24,6 +24,7 @@
 /x86_64/smm_test
 /x86_64/state_test
 /x86_64/svm_vmcall_test
+/x86_64/svm_int_ctl_test
 /x86_64/sync_regs_test
 /x86_64/tsc_msrs_test
 /x86_64/userspace_msr_exit_test
index 9ac325cfc94a2cab51f2d2db495aee11fd2d83d9..d1774f4613939702723d6f898915c05484bd5d1b 100644 (file)
@@ -56,6 +56,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/smm_test
 TEST_GEN_PROGS_x86_64 += x86_64/state_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
 TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
+TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test
 TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
 TEST_GEN_PROGS_x86_64 += x86_64/userspace_msr_exit_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_apic_access_test
index 71e277c7c3f3338d11796022bda66394e170d0fd..5d95113c7b7c540842d5df93cfcef16658765e82 100644 (file)
@@ -371,9 +371,7 @@ static void help(char *name)
        printf(" -v: specify the number of vCPUs to run.\n");
        printf(" -o: Overlap guest memory accesses instead of partitioning\n"
               "     them into a separate region of memory for each vCPU.\n");
-       printf(" -s: specify the type of memory that should be used to\n"
-              "     back the guest data region.\n\n");
-       backing_src_help();
+       backing_src_help("-s");
        puts("");
        exit(0);
 }
@@ -381,7 +379,7 @@ static void help(char *name)
 int main(int argc, char *argv[])
 {
        struct test_params params = {
-               .backing_src = VM_MEM_SRC_ANONYMOUS,
+               .backing_src = DEFAULT_VM_MEM_SRC,
                .vcpu_memory_bytes = DEFAULT_PER_VCPU_MEM_SIZE,
                .vcpus = 1,
        };
index e79c1b64977f146465345020dad34424b69ba78a..1510b21e6306143b202581d76415451545332797 100644 (file)
@@ -179,7 +179,7 @@ static void *uffd_handler_thread_fn(void *arg)
                        return NULL;
                }
 
-               if (!pollfd[0].revents & POLLIN)
+               if (!(pollfd[0].revents & POLLIN))
                        continue;
 
                r = read(uffd, &msg, sizeof(msg));
@@ -416,7 +416,7 @@ static void help(char *name)
 {
        puts("");
        printf("usage: %s [-h] [-m vm_mode] [-u uffd_mode] [-d uffd_delay_usec]\n"
-              "          [-b memory] [-t type] [-v vcpus] [-o]\n", name);
+              "          [-b memory] [-s type] [-v vcpus] [-o]\n", name);
        guest_modes_help();
        printf(" -u: use userfaultfd to handle vCPU page faults. Mode is a\n"
               "     UFFD registration mode: 'MISSING' or 'MINOR'.\n");
@@ -426,8 +426,7 @@ static void help(char *name)
        printf(" -b: specify the size of the memory region which should be\n"
               "     demand paged by each vCPU. e.g. 10M or 3G.\n"
               "     Default: 1G\n");
-       printf(" -t: The type of backing memory to use. Default: anonymous\n");
-       backing_src_help();
+       backing_src_help("-s");
        printf(" -v: specify the number of vCPUs to run.\n");
        printf(" -o: Overlap guest memory accesses instead of partitioning\n"
               "     them into a separate region of memory for each vCPU.\n");
@@ -439,14 +438,14 @@ int main(int argc, char *argv[])
 {
        int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
        struct test_params p = {
-               .src_type = VM_MEM_SRC_ANONYMOUS,
+               .src_type = DEFAULT_VM_MEM_SRC,
                .partition_vcpu_memory_access = true,
        };
        int opt;
 
        guest_modes_append_default();
 
-       while ((opt = getopt(argc, argv, "hm:u:d:b:t:v:o")) != -1) {
+       while ((opt = getopt(argc, argv, "hm:u:d:b:s:v:o")) != -1) {
                switch (opt) {
                case 'm':
                        guest_modes_cmdline(optarg);
@@ -465,7 +464,7 @@ int main(int argc, char *argv[])
                case 'b':
                        guest_percpu_mem_size = parse_size(optarg);
                        break;
-               case 't':
+               case 's':
                        p.src_type = parse_backing_src_type(optarg);
                        break;
                case 'v':
@@ -485,7 +484,7 @@ int main(int argc, char *argv[])
 
        if (p.uffd_mode == UFFDIO_REGISTER_MODE_MINOR &&
            !backing_src_is_shared(p.src_type)) {
-               TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -t");
+               TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -s");
        }
 
        for_each_guest_mode(run_test, &p);
index 479868570d593745c07e13b28a96ac569c88e7f1..7ffab5bd5ce55a59da6e128e226847cf0c765353 100644 (file)
@@ -118,42 +118,64 @@ static inline void disable_dirty_logging(struct kvm_vm *vm, int slots)
        toggle_dirty_logging(vm, slots, false);
 }
 
-static void get_dirty_log(struct kvm_vm *vm, int slots, unsigned long *bitmap,
-                         uint64_t nr_pages)
+static void get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots)
 {
-       uint64_t slot_pages = nr_pages / slots;
        int i;
 
        for (i = 0; i < slots; i++) {
                int slot = PERF_TEST_MEM_SLOT_INDEX + i;
-               unsigned long *slot_bitmap = bitmap + i * slot_pages;
 
-               kvm_vm_get_dirty_log(vm, slot, slot_bitmap);
+               kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);
        }
 }
 
-static void clear_dirty_log(struct kvm_vm *vm, int slots, unsigned long *bitmap,
-                           uint64_t nr_pages)
+static void clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
+                           int slots, uint64_t pages_per_slot)
 {
-       uint64_t slot_pages = nr_pages / slots;
        int i;
 
        for (i = 0; i < slots; i++) {
                int slot = PERF_TEST_MEM_SLOT_INDEX + i;
-               unsigned long *slot_bitmap = bitmap + i * slot_pages;
 
-               kvm_vm_clear_dirty_log(vm, slot, slot_bitmap, 0, slot_pages);
+               kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);
        }
 }
 
+static unsigned long **alloc_bitmaps(int slots, uint64_t pages_per_slot)
+{
+       unsigned long **bitmaps;
+       int i;
+
+       bitmaps = malloc(slots * sizeof(bitmaps[0]));
+       TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array.");
+
+       for (i = 0; i < slots; i++) {
+               bitmaps[i] = bitmap_zalloc(pages_per_slot);
+               TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap.");
+       }
+
+       return bitmaps;
+}
+
+static void free_bitmaps(unsigned long *bitmaps[], int slots)
+{
+       int i;
+
+       for (i = 0; i < slots; i++)
+               free(bitmaps[i]);
+
+       free(bitmaps);
+}
+
 static void run_test(enum vm_guest_mode mode, void *arg)
 {
        struct test_params *p = arg;
        pthread_t *vcpu_threads;
        struct kvm_vm *vm;
-       unsigned long *bmap;
+       unsigned long **bitmaps;
        uint64_t guest_num_pages;
        uint64_t host_num_pages;
+       uint64_t pages_per_slot;
        int vcpu_id;
        struct timespec start;
        struct timespec ts_diff;
@@ -171,7 +193,9 @@ static void run_test(enum vm_guest_mode mode, void *arg)
        guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
        guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
        host_num_pages = vm_num_host_pages(mode, guest_num_pages);
-       bmap = bitmap_zalloc(host_num_pages);
+       pages_per_slot = host_num_pages / p->slots;
+
+       bitmaps = alloc_bitmaps(p->slots, pages_per_slot);
 
        if (dirty_log_manual_caps) {
                cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
@@ -239,7 +263,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
                        iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
 
                clock_gettime(CLOCK_MONOTONIC, &start);
-               get_dirty_log(vm, p->slots, bmap, host_num_pages);
+               get_dirty_log(vm, bitmaps, p->slots);
                ts_diff = timespec_elapsed(start);
                get_dirty_log_total = timespec_add(get_dirty_log_total,
                                                   ts_diff);
@@ -248,7 +272,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 
                if (dirty_log_manual_caps) {
                        clock_gettime(CLOCK_MONOTONIC, &start);
-                       clear_dirty_log(vm, p->slots, bmap, host_num_pages);
+                       clear_dirty_log(vm, bitmaps, p->slots, pages_per_slot);
                        ts_diff = timespec_elapsed(start);
                        clear_dirty_log_total = timespec_add(clear_dirty_log_total,
                                                             ts_diff);
@@ -281,7 +305,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
                        clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
        }
 
-       free(bmap);
+       free_bitmaps(bitmaps, p->slots);
        free(vcpu_threads);
        perf_test_destroy_vm(vm);
 }
@@ -308,11 +332,9 @@ static void help(char *name)
        printf(" -v: specify the number of vCPUs to run.\n");
        printf(" -o: Overlap guest memory accesses instead of partitioning\n"
               "     them into a separate region of memory for each vCPU.\n");
-       printf(" -s: specify the type of memory that should be used to\n"
-              "     back the guest data region.\n\n");
+       backing_src_help("-s");
        printf(" -x: Split the memory region into this number of memslots.\n"
-              "     (default: 1)");
-       backing_src_help();
+              "     (default: 1)\n");
        puts("");
        exit(0);
 }
@@ -324,7 +346,7 @@ int main(int argc, char *argv[])
                .iterations = TEST_HOST_LOOP_N,
                .wr_fract = 1,
                .partition_vcpu_memory_access = true,
-               .backing_src = VM_MEM_SRC_ANONYMOUS,
+               .backing_src = DEFAULT_VM_MEM_SRC,
                .slots = 1,
        };
        int opt;
index 451fed5ce8e721cca01c66e0c22dd0711d304588..f8fddc84c0d3b3b18e5a5d791ae8f5da5ed6f759 100644 (file)
@@ -90,6 +90,8 @@ enum vm_mem_backing_src_type {
        NUM_SRC_TYPES,
 };
 
+#define DEFAULT_VM_MEM_SRC VM_MEM_SRC_ANONYMOUS
+
 struct vm_mem_backing_src_alias {
        const char *name;
        uint32_t flag;
@@ -102,7 +104,7 @@ size_t get_trans_hugepagesz(void);
 size_t get_def_hugetlb_pagesz(void);
 const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i);
 size_t get_backing_src_pagesz(uint32_t i);
-void backing_src_help(void);
+void backing_src_help(const char *flag);
 enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name);
 long get_run_delay(void);
 
index 242ae8e09a653a8362959a03b099cd4f620ff864..05e65ca1c30cda841a66b9ee7886b99656c070bf 100644 (file)
@@ -312,37 +312,37 @@ static inline void set_xmm(int n, unsigned long val)
        }
 }
 
-typedef unsigned long v1di __attribute__ ((vector_size (8)));
+#define GET_XMM(__xmm)                                                 \
+({                                                                     \
+       unsigned long __val;                                            \
+       asm volatile("movq %%"#__xmm", %0" : "=r"(__val));              \
+       __val;                                                          \
+})
+
 static inline unsigned long get_xmm(int n)
 {
        assert(n >= 0 && n <= 7);
 
-       register v1di xmm0 __asm__("%xmm0");
-       register v1di xmm1 __asm__("%xmm1");
-       register v1di xmm2 __asm__("%xmm2");
-       register v1di xmm3 __asm__("%xmm3");
-       register v1di xmm4 __asm__("%xmm4");
-       register v1di xmm5 __asm__("%xmm5");
-       register v1di xmm6 __asm__("%xmm6");
-       register v1di xmm7 __asm__("%xmm7");
        switch (n) {
        case 0:
-               return (unsigned long)xmm0;
+               return GET_XMM(xmm0);
        case 1:
-               return (unsigned long)xmm1;
+               return GET_XMM(xmm1);
        case 2:
-               return (unsigned long)xmm2;
+               return GET_XMM(xmm2);
        case 3:
-               return (unsigned long)xmm3;
+               return GET_XMM(xmm3);
        case 4:
-               return (unsigned long)xmm4;
+               return GET_XMM(xmm4);
        case 5:
-               return (unsigned long)xmm5;
+               return GET_XMM(xmm5);
        case 6:
-               return (unsigned long)xmm6;
+               return GET_XMM(xmm6);
        case 7:
-               return (unsigned long)xmm7;
+               return GET_XMM(xmm7);
        }
+
+       /* never reached */
        return 0;
 }
 
index 0d04a7db7f249324857e8c41671060da9bb4099f..36407cb0ec85dcb62277f27215a19e2a285bb0f2 100644 (file)
@@ -456,10 +456,7 @@ static void help(char *name)
               "     (default: 1G)\n");
        printf(" -v: specify the number of vCPUs to run\n"
               "     (default: 1)\n");
-       printf(" -s: specify the type of memory that should be used to\n"
-              "     back the guest data region.\n"
-              "     (default: anonymous)\n\n");
-       backing_src_help();
+       backing_src_help("-s");
        puts("");
 }
 
@@ -468,7 +465,7 @@ int main(int argc, char *argv[])
        int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
        struct test_params p = {
                .test_mem_size = DEFAULT_TEST_MEM_SIZE,
-               .src_type = VM_MEM_SRC_ANONYMOUS,
+               .src_type = DEFAULT_VM_MEM_SRC,
        };
        int opt;
 
index a9107bfae4021ab0dfd8259c4310ffb2eb7502e2..b724291089939380bd0e8fa6093d8ef5ad339092 100644 (file)
@@ -283,13 +283,22 @@ size_t get_backing_src_pagesz(uint32_t i)
        }
 }
 
-void backing_src_help(void)
+static void print_available_backing_src_types(const char *prefix)
 {
        int i;
 
-       printf("Available backing src types:\n");
+       printf("%sAvailable backing src types:\n", prefix);
+
        for (i = 0; i < NUM_SRC_TYPES; i++)
-               printf("\t%s\n", vm_mem_backing_src_alias(i)->name);
+               printf("%s    %s\n", prefix, vm_mem_backing_src_alias(i)->name);
+}
+
+void backing_src_help(const char *flag)
+{
+       printf(" %s: specify the type of memory that should be used to\n"
+              "     back the guest data region. (default: %s)\n",
+              flag, vm_mem_backing_src_alias(DEFAULT_VM_MEM_SRC)->name);
+       print_available_backing_src_types("     ");
 }
 
 enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name)
@@ -300,7 +309,7 @@ enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name)
                if (!strcmp(type_name, vm_mem_backing_src_alias(i)->name))
                        return i;
 
-       backing_src_help();
+       print_available_backing_src_types("");
        TEST_FAIL("Unknown backing src type: %s", type_name);
        return -1;
 }
index 060538bd405a98af2ebed09e01622308af1afe2b..4158da0da2bba8e5e8ea12acd87f6cff6206e3ae 100644 (file)
@@ -10,6 +10,7 @@
 #include <signal.h>
 #include <syscall.h>
 #include <sys/ioctl.h>
+#include <sys/sysinfo.h>
 #include <asm/barrier.h>
 #include <linux/atomic.h>
 #include <linux/rseq.h>
@@ -39,6 +40,7 @@ static __thread volatile struct rseq __rseq = {
 
 static pthread_t migration_thread;
 static cpu_set_t possible_mask;
+static int min_cpu, max_cpu;
 static bool done;
 
 static atomic_t seq_cnt;
@@ -57,20 +59,37 @@ static void sys_rseq(int flags)
        TEST_ASSERT(!r, "rseq failed, errno = %d (%s)", errno, strerror(errno));
 }
 
+static int next_cpu(int cpu)
+{
+       /*
+        * Advance to the next CPU, skipping those that weren't in the original
+        * affinity set.  Sadly, there is no CPU_SET_FOR_EACH, and cpu_set_t's
+        * data storage is considered as opaque.  Note, if this task is pinned
+        * to a small set of discontigous CPUs, e.g. 2 and 1023, this loop will
+        * burn a lot cycles and the test will take longer than normal to
+        * complete.
+        */
+       do {
+               cpu++;
+               if (cpu > max_cpu) {
+                       cpu = min_cpu;
+                       TEST_ASSERT(CPU_ISSET(cpu, &possible_mask),
+                                   "Min CPU = %d must always be usable", cpu);
+                       break;
+               }
+       } while (!CPU_ISSET(cpu, &possible_mask));
+
+       return cpu;
+}
+
 static void *migration_worker(void *ign)
 {
        cpu_set_t allowed_mask;
-       int r, i, nr_cpus, cpu;
+       int r, i, cpu;
 
        CPU_ZERO(&allowed_mask);
 
-       nr_cpus = CPU_COUNT(&possible_mask);
-
-       for (i = 0; i < NR_TASK_MIGRATIONS; i++) {
-               cpu = i % nr_cpus;
-               if (!CPU_ISSET(cpu, &possible_mask))
-                       continue;
-
+       for (i = 0, cpu = min_cpu; i < NR_TASK_MIGRATIONS; i++, cpu = next_cpu(cpu)) {
                CPU_SET(cpu, &allowed_mask);
 
                /*
@@ -154,6 +173,36 @@ static void *migration_worker(void *ign)
        return NULL;
 }
 
+static int calc_min_max_cpu(void)
+{
+       int i, cnt, nproc;
+
+       if (CPU_COUNT(&possible_mask) < 2)
+               return -EINVAL;
+
+       /*
+        * CPU_SET doesn't provide a FOR_EACH helper, get the min/max CPU that
+        * this task is affined to in order to reduce the time spent querying
+        * unusable CPUs, e.g. if this task is pinned to a small percentage of
+        * total CPUs.
+        */
+       nproc = get_nprocs_conf();
+       min_cpu = -1;
+       max_cpu = -1;
+       cnt = 0;
+
+       for (i = 0; i < nproc; i++) {
+               if (!CPU_ISSET(i, &possible_mask))
+                       continue;
+               if (min_cpu == -1)
+                       min_cpu = i;
+               max_cpu = i;
+               cnt++;
+       }
+
+       return (cnt < 2) ? -EINVAL : 0;
+}
+
 int main(int argc, char *argv[])
 {
        int r, i, snapshot;
@@ -167,8 +216,8 @@ int main(int argc, char *argv[])
        TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno,
                    strerror(errno));
 
-       if (CPU_COUNT(&possible_mask) < 2) {
-               print_skip("Only one CPU, task migration not possible\n");
+       if (calc_min_max_cpu()) {
+               print_skip("Only one usable CPU, task migration not possible");
                exit(KSFT_SKIP);
        }
 
@@ -180,6 +229,7 @@ int main(int argc, char *argv[])
         * CPU affinity.
         */
        vm = vm_create_default(VCPU_ID, 0, guest_code);
+       ucall_init(vm, NULL);
 
        pthread_create(&migration_thread, NULL, migration_worker, 0);
 
index 2172d65b85e44870d16fa121a594a90a6f1e2d1e..62f2eb9ee3d5650ce825992f577ffb797e884f66 100644 (file)
@@ -116,12 +116,12 @@ struct st_time {
        uint64_t st_time;
 };
 
-static int64_t smccc(uint32_t func, uint32_t arg)
+static int64_t smccc(uint32_t func, uint64_t arg)
 {
        unsigned long ret;
 
        asm volatile(
-               "mov    x0, %1\n"
+               "mov    w0, %w1\n"
                "mov    x1, %2\n"
                "hvc    #0\n"
                "mov    %0, x0\n"
diff --git a/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c b/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c
new file mode 100644 (file)
index 0000000..df04f56
--- /dev/null
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * svm_int_ctl_test
+ *
+ * Copyright (C) 2021, Red Hat, Inc.
+ *
+ * Nested SVM testing: test simultaneous use of V_IRQ from L1 and L0.
+ */
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "svm_util.h"
+#include "apic.h"
+
+#define VCPU_ID                0
+
+static struct kvm_vm *vm;
+
+bool vintr_irq_called;
+bool intr_irq_called;
+
+#define VINTR_IRQ_NUMBER 0x20
+#define INTR_IRQ_NUMBER 0x30
+
+static void vintr_irq_handler(struct ex_regs *regs)
+{
+       vintr_irq_called = true;
+}
+
+static void intr_irq_handler(struct ex_regs *regs)
+{
+       x2apic_write_reg(APIC_EOI, 0x00);
+       intr_irq_called = true;
+}
+
+static void l2_guest_code(struct svm_test_data *svm)
+{
+       /* This code raises interrupt INTR_IRQ_NUMBER in the L1's LAPIC,
+        * and since L1 didn't enable virtual interrupt masking,
+        * L2 should receive it and not L1.
+        *
+        * L2 also has virtual interrupt 'VINTR_IRQ_NUMBER' pending in V_IRQ
+        * so it should also receive it after the following 'sti'.
+        */
+       x2apic_write_reg(APIC_ICR,
+               APIC_DEST_SELF | APIC_INT_ASSERT | INTR_IRQ_NUMBER);
+
+       __asm__ __volatile__(
+               "sti\n"
+               "nop\n"
+       );
+
+       GUEST_ASSERT(vintr_irq_called);
+       GUEST_ASSERT(intr_irq_called);
+
+       __asm__ __volatile__(
+               "vmcall\n"
+       );
+}
+
+static void l1_guest_code(struct svm_test_data *svm)
+{
+       #define L2_GUEST_STACK_SIZE 64
+       unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+       struct vmcb *vmcb = svm->vmcb;
+
+       x2apic_enable();
+
+       /* Prepare for L2 execution. */
+       generic_svm_setup(svm, l2_guest_code,
+                         &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+       /* No virtual interrupt masking */
+       vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
+
+       /* No intercepts for real and virtual interrupts */
+       vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR | INTERCEPT_VINTR);
+
+       /* Make a virtual interrupt VINTR_IRQ_NUMBER pending */
+       vmcb->control.int_ctl |= V_IRQ_MASK | (0x1 << V_INTR_PRIO_SHIFT);
+       vmcb->control.int_vector = VINTR_IRQ_NUMBER;
+
+       run_guest(vmcb, svm->vmcb_gpa);
+       GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
+       GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+       vm_vaddr_t svm_gva;
+
+       nested_svm_check_supported();
+
+       vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
+
+       vm_init_descriptor_tables(vm);
+       vcpu_init_descriptor_tables(vm, VCPU_ID);
+
+       vm_install_exception_handler(vm, VINTR_IRQ_NUMBER, vintr_irq_handler);
+       vm_install_exception_handler(vm, INTR_IRQ_NUMBER, intr_irq_handler);
+
+       vcpu_alloc_svm(vm, &svm_gva);
+       vcpu_args_set(vm, VCPU_ID, 1, svm_gva);
+
+       struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+       struct ucall uc;
+
+       vcpu_run(vm, VCPU_ID);
+       TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+                   "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
+                   run->exit_reason,
+                   exit_reason_str(run->exit_reason));
+
+       switch (get_ucall(vm, VCPU_ID, &uc)) {
+       case UCALL_ABORT:
+               TEST_FAIL("%s", (const char *)uc.args[0]);
+               break;
+               /* NOT REACHED */
+       case UCALL_DONE:
+               goto done;
+       default:
+               TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
+       }
+done:
+       kvm_vm_free(vm);
+       return 0;
+}
index 21b646d10b88f10e02315859ef67351f6ad0a29d..86ab429fe7f3bf506cf28c05ec6bec37cc11cdc0 100644 (file)
@@ -43,3 +43,4 @@ CONFIG_NET_ACT_TUNNEL_KEY=m
 CONFIG_NET_ACT_MIRRED=m
 CONFIG_BAREUDP=m
 CONFIG_IPV6_IOAM6_LWTUNNEL=y
+CONFIG_CRYPTO_SM4=y
index 13350cd5c8ac35ddfbe278145e7bcae264296b88..8e67a252b672f853d8483787553e508b493e3f6c 100755 (executable)
@@ -289,6 +289,12 @@ set_sysctl()
        run_cmd sysctl -q -w $*
 }
 
+# get sysctl values in NS-A
+get_sysctl()
+{
+       ${NSA_CMD} sysctl -n $*
+}
+
 ################################################################################
 # Setup for tests
 
@@ -1003,6 +1009,60 @@ ipv4_tcp_md5()
        run_cmd nettest -s -I ${NSA_DEV} -M ${MD5_PW} -m ${NS_NET}
        log_test $? 1 "MD5: VRF: Device must be a VRF - prefix"
 
+       test_ipv4_md5_vrf__vrf_server__no_bind_ifindex
+       test_ipv4_md5_vrf__global_server__bind_ifindex0
+}
+
+test_ipv4_md5_vrf__vrf_server__no_bind_ifindex()
+{
+       log_start
+       show_hint "Simulates applications using VRF without TCP_MD5SIG_FLAG_IFINDEX"
+       run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} --no-bind-key-ifindex &
+       sleep 1
+       run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
+       log_test $? 0 "MD5: VRF: VRF-bound server, unbound key accepts connection"
+
+       log_start
+       show_hint "Binding both the socket and the key is not required but it works"
+       run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} --force-bind-key-ifindex &
+       sleep 1
+       run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
+       log_test $? 0 "MD5: VRF: VRF-bound server, bound key accepts connection"
+}
+
+test_ipv4_md5_vrf__global_server__bind_ifindex0()
+{
+       # This particular test needs tcp_l3mdev_accept=1 for Global server to accept VRF connections
+       local old_tcp_l3mdev_accept
+       old_tcp_l3mdev_accept=$(get_sysctl net.ipv4.tcp_l3mdev_accept)
+       set_sysctl net.ipv4.tcp_l3mdev_accept=1
+
+       log_start
+       run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --force-bind-key-ifindex &
+       sleep 1
+       run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
+       log_test $? 2 "MD5: VRF: Global server, Key bound to ifindex=0 rejects VRF connection"
+
+       log_start
+       run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --force-bind-key-ifindex &
+       sleep 1
+       run_cmd_nsc nettest -r ${NSA_IP} -X ${MD5_PW}
+       log_test $? 0 "MD5: VRF: Global server, key bound to ifindex=0 accepts non-VRF connection"
+       log_start
+
+       run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --no-bind-key-ifindex &
+       sleep 1
+       run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
+       log_test $? 0 "MD5: VRF: Global server, key not bound to ifindex accepts VRF connection"
+
+       log_start
+       run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --no-bind-key-ifindex &
+       sleep 1
+       run_cmd_nsc nettest -r ${NSA_IP} -X ${MD5_PW}
+       log_test $? 0 "MD5: VRF: Global server, key not bound to ifindex accepts non-VRF connection"
+
+       # restore value
+       set_sysctl net.ipv4.tcp_l3mdev_accept="$old_tcp_l3mdev_accept"
 }
 
 ipv4_tcp_novrf()
index d97bd6889446df20a6011dd3f8f911267db0f0f0..72ee644d47bfa035ace08cdb68ef2e88eaad9a1c 100644 (file)
@@ -9,6 +9,7 @@ TEST_PROGS = bridge_igmp.sh \
        gre_inner_v4_multipath.sh \
        gre_inner_v6_multipath.sh \
        gre_multipath.sh \
+       ip6_forward_instats_vrf.sh \
        ip6gre_inner_v4_multipath.sh \
        ip6gre_inner_v6_multipath.sh \
        ipip_flat_gre_key.sh \
index b802c14d295096d5e656ed4ea06b2da4287d2e6f..e5e2fbeca22ec6cad75ea0bb2acf2c583fb6a144 100644 (file)
@@ -39,3 +39,5 @@ NETIF_CREATE=yes
 # Timeout (in seconds) before ping exits regardless of how many packets have
 # been sent or received
 PING_TIMEOUT=5
+# IPv6 traceroute utility name.
+TROUTE6=traceroute6
diff --git a/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh b/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh
new file mode 100755 (executable)
index 0000000..9f5b3e2
--- /dev/null
@@ -0,0 +1,172 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test ipv6 stats on the incoming if when forwarding with VRF
+
+ALL_TESTS="
+       ipv6_ping
+       ipv6_in_too_big_err
+       ipv6_in_hdr_err
+       ipv6_in_addr_err
+       ipv6_in_discard
+"
+
+NUM_NETIFS=4
+source lib.sh
+
+h1_create()
+{
+       simple_if_init $h1 2001:1:1::2/64
+       ip -6 route add vrf v$h1 2001:1:2::/64 via 2001:1:1::1
+}
+
+h1_destroy()
+{
+       ip -6 route del vrf v$h1 2001:1:2::/64 via 2001:1:1::1
+       simple_if_fini $h1 2001:1:1::2/64
+}
+
+router_create()
+{
+       vrf_create router
+       __simple_if_init $rtr1 router 2001:1:1::1/64
+       __simple_if_init $rtr2 router 2001:1:2::1/64
+       mtu_set $rtr2 1280
+}
+
+router_destroy()
+{
+       mtu_restore $rtr2
+       __simple_if_fini $rtr2 2001:1:2::1/64
+       __simple_if_fini $rtr1 2001:1:1::1/64
+       vrf_destroy router
+}
+
+h2_create()
+{
+       simple_if_init $h2 2001:1:2::2/64
+       ip -6 route add vrf v$h2 2001:1:1::/64 via 2001:1:2::1
+       mtu_set $h2 1280
+}
+
+h2_destroy()
+{
+       mtu_restore $h2
+       ip -6 route del vrf v$h2 2001:1:1::/64 via 2001:1:2::1
+       simple_if_fini $h2 2001:1:2::2/64
+}
+
+setup_prepare()
+{
+       h1=${NETIFS[p1]}
+       rtr1=${NETIFS[p2]}
+
+       rtr2=${NETIFS[p3]}
+       h2=${NETIFS[p4]}
+
+       vrf_prepare
+       h1_create
+       router_create
+       h2_create
+
+       forwarding_enable
+}
+
+cleanup()
+{
+       pre_cleanup
+
+       forwarding_restore
+
+       h2_destroy
+       router_destroy
+       h1_destroy
+       vrf_cleanup
+}
+
+ipv6_in_too_big_err()
+{
+       RET=0
+
+       local t0=$(ipv6_stats_get $rtr1 Ip6InTooBigErrors)
+       local vrf_name=$(master_name_get $h1)
+
+       # Send too big packets
+       ip vrf exec $vrf_name \
+               $PING6 -s 1300 2001:1:2::2 -c 1 -w $PING_TIMEOUT &> /dev/null
+
+       local t1=$(ipv6_stats_get $rtr1 Ip6InTooBigErrors)
+       test "$((t1 - t0))" -ne 0
+       check_err $?
+       log_test "Ip6InTooBigErrors"
+}
+
+ipv6_in_hdr_err()
+{
+       RET=0
+
+       local t0=$(ipv6_stats_get $rtr1 Ip6InHdrErrors)
+       local vrf_name=$(master_name_get $h1)
+
+       # Send packets with hop limit 1, easiest with traceroute6 as some ping6
+       # doesn't allow hop limit to be specified
+       ip vrf exec $vrf_name \
+               $TROUTE6 2001:1:2::2 &> /dev/null
+
+       local t1=$(ipv6_stats_get $rtr1 Ip6InHdrErrors)
+       test "$((t1 - t0))" -ne 0
+       check_err $?
+       log_test "Ip6InHdrErrors"
+}
+
+ipv6_in_addr_err()
+{
+       RET=0
+
+       local t0=$(ipv6_stats_get $rtr1 Ip6InAddrErrors)
+       local vrf_name=$(master_name_get $h1)
+
+       # Disable forwarding temporary while sending the packet
+       sysctl -qw net.ipv6.conf.all.forwarding=0
+       ip vrf exec $vrf_name \
+               $PING6 2001:1:2::2 -c 1 -w $PING_TIMEOUT &> /dev/null
+       sysctl -qw net.ipv6.conf.all.forwarding=1
+
+       local t1=$(ipv6_stats_get $rtr1 Ip6InAddrErrors)
+       test "$((t1 - t0))" -ne 0
+       check_err $?
+       log_test "Ip6InAddrErrors"
+}
+
+ipv6_in_discard()
+{
+       RET=0
+
+       local t0=$(ipv6_stats_get $rtr1 Ip6InDiscards)
+       local vrf_name=$(master_name_get $h1)
+
+       # Add a policy to discard
+       ip xfrm policy add dst 2001:1:2::2/128 dir fwd action block
+       ip vrf exec $vrf_name \
+               $PING6 2001:1:2::2 -c 1 -w $PING_TIMEOUT &> /dev/null
+       ip xfrm policy del dst 2001:1:2::2/128 dir fwd
+
+       local t1=$(ipv6_stats_get $rtr1 Ip6InDiscards)
+       test "$((t1 - t0))" -ne 0
+       check_err $?
+       log_test "Ip6InDiscards"
+}
+ipv6_ping()
+{
+       RET=0
+
+       ping6_test $h1 2001:1:2::2
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
index e7fc5c35b569029f196b1d3f7eaea58c22365306..92087d423bcf1d4950c1e28c284e265d30aaff17 100644 (file)
@@ -751,6 +751,14 @@ qdisc_parent_stats_get()
            | jq '.[] | select(.parent == "'"$parent"'") | '"$selector"
 }
 
+ipv6_stats_get()
+{
+       local dev=$1; shift
+       local stat=$1; shift
+
+       cat /proc/net/dev_snmp6/$dev | grep "^$stat" | cut -f2
+}
+
 humanize()
 {
        local speed=$1; shift
index 3caf72bb9c6a1241511bdb0b7f72c77ddce174b8..a2489ec398fe01325e23f17ac253e6c4002e27bc 100755 (executable)
@@ -468,10 +468,26 @@ out_bits()
   for i in {0..22}
   do
     ip -netns ioam-node-alpha route change db01::/64 encap ioam6 trace \
-           prealloc type ${bit2type[$i]} ns 123 size ${bit2size[$i]} dev veth0
-
-    run_test "out_bit$i" "${desc/<n>/$i}" ioam-node-alpha ioam-node-beta \
-           db01::2 db01::1 veth0 ${bit2type[$i]} 123
+           prealloc type ${bit2type[$i]} ns 123 size ${bit2size[$i]} \
+           dev veth0 &>/dev/null
+
+    local cmd_res=$?
+    local descr="${desc/<n>/$i}"
+
+    if [[ $i -ge 12 && $i -le 21 ]]
+    then
+      if [ $cmd_res != 0 ]
+      then
+        npassed=$((npassed+1))
+        log_test_passed "$descr"
+      else
+        nfailed=$((nfailed+1))
+        log_test_failed "$descr"
+      fi
+    else
+      run_test "out_bit$i" "$descr" ioam-node-alpha ioam-node-beta \
+             db01::2 db01::1 veth0 ${bit2type[$i]} 123
+    fi
   done
 
   bit2size[22]=$tmp
@@ -544,7 +560,7 @@ in_bits()
   local tmp=${bit2size[22]}
   bit2size[22]=$(( $tmp + ${#BETA[9]} + ((4 - (${#BETA[9]} % 4)) % 4) ))
 
-  for i in {0..22}
+  for i in {0..11} {22..22}
   do
     ip -netns ioam-node-alpha route change db01::/64 encap ioam6 trace \
            prealloc type ${bit2type[$i]} ns 123 size ${bit2size[$i]} dev veth0
index d376cb2c383c3821270fa657a6025ae5471857be..8f6997d3581612ba7379349594c79e00ebaa6e36 100644 (file)
@@ -94,16 +94,6 @@ enum {
        TEST_OUT_BIT9,
        TEST_OUT_BIT10,
        TEST_OUT_BIT11,
-       TEST_OUT_BIT12,
-       TEST_OUT_BIT13,
-       TEST_OUT_BIT14,
-       TEST_OUT_BIT15,
-       TEST_OUT_BIT16,
-       TEST_OUT_BIT17,
-       TEST_OUT_BIT18,
-       TEST_OUT_BIT19,
-       TEST_OUT_BIT20,
-       TEST_OUT_BIT21,
        TEST_OUT_BIT22,
        TEST_OUT_FULL_SUPP_TRACE,
 
@@ -125,16 +115,6 @@ enum {
        TEST_IN_BIT9,
        TEST_IN_BIT10,
        TEST_IN_BIT11,
-       TEST_IN_BIT12,
-       TEST_IN_BIT13,
-       TEST_IN_BIT14,
-       TEST_IN_BIT15,
-       TEST_IN_BIT16,
-       TEST_IN_BIT17,
-       TEST_IN_BIT18,
-       TEST_IN_BIT19,
-       TEST_IN_BIT20,
-       TEST_IN_BIT21,
        TEST_IN_BIT22,
        TEST_IN_FULL_SUPP_TRACE,
 
@@ -199,30 +179,6 @@ static int check_ioam_header(int tid, struct ioam6_trace_hdr *ioam6h,
                       ioam6h->nodelen != 2 ||
                       ioam6h->remlen;
 
-       case TEST_OUT_BIT12:
-       case TEST_IN_BIT12:
-       case TEST_OUT_BIT13:
-       case TEST_IN_BIT13:
-       case TEST_OUT_BIT14:
-       case TEST_IN_BIT14:
-       case TEST_OUT_BIT15:
-       case TEST_IN_BIT15:
-       case TEST_OUT_BIT16:
-       case TEST_IN_BIT16:
-       case TEST_OUT_BIT17:
-       case TEST_IN_BIT17:
-       case TEST_OUT_BIT18:
-       case TEST_IN_BIT18:
-       case TEST_OUT_BIT19:
-       case TEST_IN_BIT19:
-       case TEST_OUT_BIT20:
-       case TEST_IN_BIT20:
-       case TEST_OUT_BIT21:
-       case TEST_IN_BIT21:
-               return ioam6h->overflow ||
-                      ioam6h->nodelen ||
-                      ioam6h->remlen != 1;
-
        case TEST_OUT_BIT22:
        case TEST_IN_BIT22:
                return ioam6h->overflow ||
@@ -326,6 +282,66 @@ static int check_ioam6_data(__u8 **p, struct ioam6_trace_hdr *ioam6h,
                *p += sizeof(__u32);
        }
 
+       if (ioam6h->type.bit12) {
+               if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+                       return 1;
+               *p += sizeof(__u32);
+       }
+
+       if (ioam6h->type.bit13) {
+               if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+                       return 1;
+               *p += sizeof(__u32);
+       }
+
+       if (ioam6h->type.bit14) {
+               if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+                       return 1;
+               *p += sizeof(__u32);
+       }
+
+       if (ioam6h->type.bit15) {
+               if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+                       return 1;
+               *p += sizeof(__u32);
+       }
+
+       if (ioam6h->type.bit16) {
+               if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+                       return 1;
+               *p += sizeof(__u32);
+       }
+
+       if (ioam6h->type.bit17) {
+               if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+                       return 1;
+               *p += sizeof(__u32);
+       }
+
+       if (ioam6h->type.bit18) {
+               if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+                       return 1;
+               *p += sizeof(__u32);
+       }
+
+       if (ioam6h->type.bit19) {
+               if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+                       return 1;
+               *p += sizeof(__u32);
+       }
+
+       if (ioam6h->type.bit20) {
+               if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+                       return 1;
+               *p += sizeof(__u32);
+       }
+
+       if (ioam6h->type.bit21) {
+               if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+                       return 1;
+               *p += sizeof(__u32);
+       }
+
        if (ioam6h->type.bit22) {
                len = cnf.sc_data ? strlen(cnf.sc_data) : 0;
                aligned = cnf.sc_data ? __ALIGN_KERNEL(len, 4) : 0;
@@ -455,26 +471,6 @@ static int str2id(const char *tname)
                return TEST_OUT_BIT10;
        if (!strcmp("out_bit11", tname))
                return TEST_OUT_BIT11;
-       if (!strcmp("out_bit12", tname))
-               return TEST_OUT_BIT12;
-       if (!strcmp("out_bit13", tname))
-               return TEST_OUT_BIT13;
-       if (!strcmp("out_bit14", tname))
-               return TEST_OUT_BIT14;
-       if (!strcmp("out_bit15", tname))
-               return TEST_OUT_BIT15;
-       if (!strcmp("out_bit16", tname))
-               return TEST_OUT_BIT16;
-       if (!strcmp("out_bit17", tname))
-               return TEST_OUT_BIT17;
-       if (!strcmp("out_bit18", tname))
-               return TEST_OUT_BIT18;
-       if (!strcmp("out_bit19", tname))
-               return TEST_OUT_BIT19;
-       if (!strcmp("out_bit20", tname))
-               return TEST_OUT_BIT20;
-       if (!strcmp("out_bit21", tname))
-               return TEST_OUT_BIT21;
        if (!strcmp("out_bit22", tname))
                return TEST_OUT_BIT22;
        if (!strcmp("out_full_supp_trace", tname))
@@ -509,26 +505,6 @@ static int str2id(const char *tname)
                return TEST_IN_BIT10;
        if (!strcmp("in_bit11", tname))
                return TEST_IN_BIT11;
-       if (!strcmp("in_bit12", tname))
-               return TEST_IN_BIT12;
-       if (!strcmp("in_bit13", tname))
-               return TEST_IN_BIT13;
-       if (!strcmp("in_bit14", tname))
-               return TEST_IN_BIT14;
-       if (!strcmp("in_bit15", tname))
-               return TEST_IN_BIT15;
-       if (!strcmp("in_bit16", tname))
-               return TEST_IN_BIT16;
-       if (!strcmp("in_bit17", tname))
-               return TEST_IN_BIT17;
-       if (!strcmp("in_bit18", tname))
-               return TEST_IN_BIT18;
-       if (!strcmp("in_bit19", tname))
-               return TEST_IN_BIT19;
-       if (!strcmp("in_bit20", tname))
-               return TEST_IN_BIT20;
-       if (!strcmp("in_bit21", tname))
-               return TEST_IN_BIT21;
        if (!strcmp("in_bit22", tname))
                return TEST_IN_BIT22;
        if (!strcmp("in_full_supp_trace", tname))
@@ -606,16 +582,6 @@ static int (*func[__TEST_MAX])(int, struct ioam6_trace_hdr *, __u32, __u16) = {
        [TEST_OUT_BIT9]         = check_ioam_header_and_data,
        [TEST_OUT_BIT10]                = check_ioam_header_and_data,
        [TEST_OUT_BIT11]                = check_ioam_header_and_data,
-       [TEST_OUT_BIT12]                = check_ioam_header,
-       [TEST_OUT_BIT13]                = check_ioam_header,
-       [TEST_OUT_BIT14]                = check_ioam_header,
-       [TEST_OUT_BIT15]                = check_ioam_header,
-       [TEST_OUT_BIT16]                = check_ioam_header,
-       [TEST_OUT_BIT17]                = check_ioam_header,
-       [TEST_OUT_BIT18]                = check_ioam_header,
-       [TEST_OUT_BIT19]                = check_ioam_header,
-       [TEST_OUT_BIT20]                = check_ioam_header,
-       [TEST_OUT_BIT21]                = check_ioam_header,
        [TEST_OUT_BIT22]                = check_ioam_header_and_data,
        [TEST_OUT_FULL_SUPP_TRACE]      = check_ioam_header_and_data,
        [TEST_IN_UNDEF_NS]              = check_ioam_header,
@@ -633,16 +599,6 @@ static int (*func[__TEST_MAX])(int, struct ioam6_trace_hdr *, __u32, __u16) = {
        [TEST_IN_BIT9]                  = check_ioam_header_and_data,
        [TEST_IN_BIT10]         = check_ioam_header_and_data,
        [TEST_IN_BIT11]         = check_ioam_header_and_data,
-       [TEST_IN_BIT12]         = check_ioam_header,
-       [TEST_IN_BIT13]         = check_ioam_header,
-       [TEST_IN_BIT14]         = check_ioam_header,
-       [TEST_IN_BIT15]         = check_ioam_header,
-       [TEST_IN_BIT16]         = check_ioam_header,
-       [TEST_IN_BIT17]         = check_ioam_header,
-       [TEST_IN_BIT18]         = check_ioam_header,
-       [TEST_IN_BIT19]         = check_ioam_header,
-       [TEST_IN_BIT20]         = check_ioam_header,
-       [TEST_IN_BIT21]         = check_ioam_header,
        [TEST_IN_BIT22]         = check_ioam_header_and_data,
        [TEST_IN_FULL_SUPP_TRACE]       = check_ioam_header_and_data,
        [TEST_FWD_FULL_SUPP_TRACE]      = check_ioam_header_and_data,
index bd6288302094b5aac09ca26f514cc5b85ec46086..b599003eb5ba873c2c56edeede2e4ce9313cd5b8 100644 (file)
@@ -28,6 +28,7 @@
 #include <unistd.h>
 #include <time.h>
 #include <errno.h>
+#include <getopt.h>
 
 #include <linux/xfrm.h>
 #include <linux/ipsec.h>
@@ -101,6 +102,8 @@ struct sock_args {
                struct sockaddr_in6 v6;
        } md5_prefix;
        unsigned int prefix_len;
+       /* 0: default, -1: force off, +1: force on */
+       int bind_key_ifindex;
 
        /* expected addresses and device index for connection */
        const char *expected_dev;
@@ -271,11 +274,14 @@ static int tcp_md5sig(int sd, void *addr, socklen_t alen, struct sock_args *args
        }
        memcpy(&md5sig.tcpm_addr, addr, alen);
 
-       if (args->ifindex) {
+       if ((args->ifindex && args->bind_key_ifindex >= 0) || args->bind_key_ifindex >= 1) {
                opt = TCP_MD5SIG_EXT;
                md5sig.tcpm_flags |= TCP_MD5SIG_FLAG_IFINDEX;
 
                md5sig.tcpm_ifindex = args->ifindex;
+               log_msg("TCP_MD5SIG_FLAG_IFINDEX set tcpm_ifindex=%d\n", md5sig.tcpm_ifindex);
+       } else {
+               log_msg("TCP_MD5SIG_FLAG_IFINDEX off\n", md5sig.tcpm_ifindex);
        }
 
        rc = setsockopt(sd, IPPROTO_TCP, opt, &md5sig, sizeof(md5sig));
@@ -1822,6 +1828,14 @@ static int ipc_parent(int cpid, int fd, struct sock_args *args)
 }
 
 #define GETOPT_STR  "sr:l:c:p:t:g:P:DRn:M:X:m:d:I:BN:O:SCi6xL:0:1:2:3:Fbq"
+#define OPT_FORCE_BIND_KEY_IFINDEX 1001
+#define OPT_NO_BIND_KEY_IFINDEX 1002
+
+static struct option long_opts[] = {
+       {"force-bind-key-ifindex", 0, 0, OPT_FORCE_BIND_KEY_IFINDEX},
+       {"no-bind-key-ifindex", 0, 0, OPT_NO_BIND_KEY_IFINDEX},
+       {0, 0, 0, 0}
+};
 
 static void print_usage(char *prog)
 {
@@ -1858,6 +1872,10 @@ static void print_usage(char *prog)
        "    -M password   use MD5 sum protection\n"
        "    -X password   MD5 password for client mode\n"
        "    -m prefix/len prefix and length to use for MD5 key\n"
+       "    --no-bind-key-ifindex: Force TCP_MD5SIG_FLAG_IFINDEX off\n"
+       "    --force-bind-key-ifindex: Force TCP_MD5SIG_FLAG_IFINDEX on\n"
+       "        (default: only if -I is passed)\n"
+       "\n"
        "    -g grp        multicast group (e.g., 239.1.1.1)\n"
        "    -i            interactive mode (default is echo and terminate)\n"
        "\n"
@@ -1893,7 +1911,7 @@ int main(int argc, char *argv[])
         * process input args
         */
 
-       while ((rc = getopt(argc, argv, GETOPT_STR)) != -1) {
+       while ((rc = getopt_long(argc, argv, GETOPT_STR, long_opts, NULL)) != -1) {
                switch (rc) {
                case 'B':
                        both_mode = 1;
@@ -1966,6 +1984,12 @@ int main(int argc, char *argv[])
                case 'M':
                        args.password = optarg;
                        break;
+               case OPT_FORCE_BIND_KEY_IFINDEX:
+                       args.bind_key_ifindex = 1;
+                       break;
+               case OPT_NO_BIND_KEY_IFINDEX:
+                       args.bind_key_ifindex = -1;
+                       break;
                case 'X':
                        args.client_pw = optarg;
                        break;
index 427d94816f2d671fed228d2492fe880f919591f6..d4ffebb989f88d2e63b14f2ccd9fe6644d3f21b7 100755 (executable)
@@ -199,7 +199,6 @@ fi
 # test basic connectivity
 if ! ip netns exec ns1 ping -c 1 -q 10.0.2.99 > /dev/null; then
   echo "ERROR: ns1 cannot reach ns2" 1>&2
-  bash
   exit 1
 fi
 
index d7e07f4c3d7fc332c1c67a039539bc51999834d4..da1c1e4b6c86bf87ccfd8a01f4dbd99295081367 100755 (executable)
@@ -741,6 +741,149 @@ EOF
        return $lret
 }
 
+# test port shadowing.
+# create two listening services, one on router (ns0), one
+# on client (ns2), which is masqueraded from ns1 point of view.
+# ns2 sends udp packet coming from service port to ns1, on a highport.
+# Later, if n1 uses same highport to connect to ns0:service, packet
+# might be port-forwarded to ns2 instead.
+
+# second argument tells if we expect the 'fake-entry' to take effect
+# (CLIENT) or not (ROUTER).
+test_port_shadow()
+{
+       local test=$1
+       local expect=$2
+       local daddrc="10.0.1.99"
+       local daddrs="10.0.1.1"
+       local result=""
+       local logmsg=""
+
+       echo ROUTER | ip netns exec "$ns0" nc -w 5 -u -l -p 1405 >/dev/null 2>&1 &
+       nc_r=$!
+
+       echo CLIENT | ip netns exec "$ns2" nc -w 5 -u -l -p 1405 >/dev/null 2>&1 &
+       nc_c=$!
+
+       # make shadow entry, from client (ns2), going to (ns1), port 41404, sport 1405.
+       echo "fake-entry" | ip netns exec "$ns2" nc -w 1 -p 1405 -u "$daddrc" 41404 > /dev/null
+
+       # ns1 tries to connect to ns0:1405.  With default settings this should connect
+       # to client, it matches the conntrack entry created above.
+
+       result=$(echo "" | ip netns exec "$ns1" nc -w 1 -p 41404 -u "$daddrs" 1405)
+
+       if [ "$result" = "$expect" ] ;then
+               echo "PASS: portshadow test $test: got reply from ${expect}${logmsg}"
+       else
+               echo "ERROR: portshadow test $test: got reply from \"$result\", not $expect as intended"
+               ret=1
+       fi
+
+       kill $nc_r $nc_c 2>/dev/null
+
+       # flush udp entries for next test round, if any
+       ip netns exec "$ns0" conntrack -F >/dev/null 2>&1
+}
+
+# This prevents port shadow of router service via packet filter,
+# packets claiming to originate from service port from internal
+# network are dropped.
+test_port_shadow_filter()
+{
+       local family=$1
+
+ip netns exec "$ns0" nft -f /dev/stdin <<EOF
+table $family filter {
+       chain forward {
+               type filter hook forward priority 0; policy accept;
+               meta iif veth1 udp sport 1405 drop
+       }
+}
+EOF
+       test_port_shadow "port-filter" "ROUTER"
+
+       ip netns exec "$ns0" nft delete table $family filter
+}
+
+# This prevents port shadow of router service via notrack.
+test_port_shadow_notrack()
+{
+       local family=$1
+
+ip netns exec "$ns0" nft -f /dev/stdin <<EOF
+table $family raw {
+       chain prerouting {
+               type filter hook prerouting priority -300; policy accept;
+               meta iif veth0 udp dport 1405 notrack
+               udp dport 1405 notrack
+       }
+       chain output {
+               type filter hook output priority -300; policy accept;
+               udp sport 1405 notrack
+       }
+}
+EOF
+       test_port_shadow "port-notrack" "ROUTER"
+
+       ip netns exec "$ns0" nft delete table $family raw
+}
+
+# This prevents port shadow of router service via sport remap.
+test_port_shadow_pat()
+{
+       local family=$1
+
+ip netns exec "$ns0" nft -f /dev/stdin <<EOF
+table $family pat {
+       chain postrouting {
+               type nat hook postrouting priority -1; policy accept;
+               meta iif veth1 udp sport <= 1405 masquerade to : 1406-65535 random
+       }
+}
+EOF
+       test_port_shadow "pat" "ROUTER"
+
+       ip netns exec "$ns0" nft delete table $family pat
+}
+
+test_port_shadowing()
+{
+       local family="ip"
+
+       ip netns exec "$ns0" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+       ip netns exec "$ns0" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+
+       ip netns exec "$ns0" nft -f /dev/stdin <<EOF
+table $family nat {
+       chain postrouting {
+               type nat hook postrouting priority 0; policy accept;
+               meta oif veth0 masquerade
+       }
+}
+EOF
+       if [ $? -ne 0 ]; then
+               echo "SKIP: Could not add add $family masquerade hook"
+               return $ksft_skip
+       fi
+
+       # test default behaviour. Packet from ns1 to ns0 is redirected to ns2.
+       test_port_shadow "default" "CLIENT"
+
+       # test packet filter based mitigation: prevent forwarding of
+       # packets claiming to come from the service port.
+       test_port_shadow_filter "$family"
+
+       # test conntrack based mitigation: connections going or coming
+       # from router:service bypass connection tracking.
+       test_port_shadow_notrack "$family"
+
+       # test nat based mitigation: fowarded packets coming from service port
+       # are masqueraded with random highport.
+       test_port_shadow_pat "$family"
+
+       ip netns exec "$ns0" nft delete table $family nat
+}
 
 # ip netns exec "$ns0" ping -c 1 -q 10.0.$i.99
 for i in 0 1 2; do
@@ -861,6 +1004,8 @@ reset_counters
 $test_inet_nat && test_redirect inet
 $test_inet_nat && test_redirect6 inet
 
+test_port_shadowing
+
 if [ $ret -ne 0 ];then
        echo -n "FAIL: "
        nft --version
diff --git a/tools/testing/selftests/netfilter/nft_nat_zones.sh b/tools/testing/selftests/netfilter/nft_nat_zones.sh
new file mode 100755 (executable)
index 0000000..b9ab373
--- /dev/null
@@ -0,0 +1,309 @@
+#!/bin/bash
+#
+# Test connection tracking zone and NAT source port reallocation support.
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+# Don't increase too much, 2000 clients should work
+# just fine but script can then take several minutes with
+# KASAN/debug builds.
+maxclients=100
+
+have_iperf=1
+ret=0
+
+# client1---.
+#            veth1-.
+#                  |
+#               NAT Gateway --veth0--> Server
+#                  | |
+#            veth2-' |
+# client2---'        |
+#  ....              |
+# clientX----vethX---'
+
+# All clients share identical IP address.
+# NAT Gateway uses policy routing and conntrack zones to isolate client
+# namespaces.  Each client connects to Server, each with colliding tuples:
+#   clientsaddr:10000 -> serveraddr:dport
+#   NAT Gateway is supposed to do port reallocation for each of the
+#   connections.
+
+sfx=$(mktemp -u "XXXXXXXX")
+gw="ns-gw-$sfx"
+cl1="ns-cl1-$sfx"
+cl2="ns-cl2-$sfx"
+srv="ns-srv-$sfx"
+
+v4gc1=$(sysctl -n net.ipv4.neigh.default.gc_thresh1 2>/dev/null)
+v4gc2=$(sysctl -n net.ipv4.neigh.default.gc_thresh2 2>/dev/null)
+v4gc3=$(sysctl -n net.ipv4.neigh.default.gc_thresh3 2>/dev/null)
+v6gc1=$(sysctl -n net.ipv6.neigh.default.gc_thresh1 2>/dev/null)
+v6gc2=$(sysctl -n net.ipv6.neigh.default.gc_thresh2 2>/dev/null)
+v6gc3=$(sysctl -n net.ipv6.neigh.default.gc_thresh3 2>/dev/null)
+
+cleanup()
+{
+       ip netns del $gw
+       ip netns del $srv
+       for i in $(seq 1 $maxclients); do
+               ip netns del ns-cl$i-$sfx 2>/dev/null
+       done
+
+       sysctl -q net.ipv4.neigh.default.gc_thresh1=$v4gc1 2>/dev/null
+       sysctl -q net.ipv4.neigh.default.gc_thresh2=$v4gc2 2>/dev/null
+       sysctl -q net.ipv4.neigh.default.gc_thresh3=$v4gc3 2>/dev/null
+       sysctl -q net.ipv6.neigh.default.gc_thresh1=$v6gc1 2>/dev/null
+       sysctl -q net.ipv6.neigh.default.gc_thresh2=$v6gc2 2>/dev/null
+       sysctl -q net.ipv6.neigh.default.gc_thresh3=$v6gc3 2>/dev/null
+}
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without nft tool"
+       exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without ip tool"
+       exit $ksft_skip
+fi
+
+conntrack -V > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without conntrack tool"
+       exit $ksft_skip
+fi
+
+iperf3 -v >/dev/null 2>&1
+if [ $? -ne 0 ];then
+       have_iperf=0
+fi
+
+ip netns add "$gw"
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not create net namespace $gw"
+       exit $ksft_skip
+fi
+ip -net "$gw" link set lo up
+
+trap cleanup EXIT
+
+ip netns add "$srv"
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not create server netns $srv"
+       exit $ksft_skip
+fi
+
+ip link add veth0 netns "$gw" type veth peer name eth0 netns "$srv"
+ip -net "$gw" link set veth0 up
+ip -net "$srv" link set lo up
+ip -net "$srv" link set eth0 up
+
+sysctl -q net.ipv6.neigh.default.gc_thresh1=512  2>/dev/null
+sysctl -q net.ipv6.neigh.default.gc_thresh2=1024 2>/dev/null
+sysctl -q net.ipv6.neigh.default.gc_thresh3=4096 2>/dev/null
+sysctl -q net.ipv4.neigh.default.gc_thresh1=512  2>/dev/null
+sysctl -q net.ipv4.neigh.default.gc_thresh2=1024 2>/dev/null
+sysctl -q net.ipv4.neigh.default.gc_thresh3=4096 2>/dev/null
+
+for i in $(seq 1 $maxclients);do
+  cl="ns-cl$i-$sfx"
+
+  ip netns add "$cl"
+  if [ $? -ne 0 ];then
+     echo "SKIP: Could not create client netns $cl"
+     exit $ksft_skip
+  fi
+  ip link add veth$i netns "$gw" type veth peer name eth0 netns "$cl" > /dev/null 2>&1
+  if [ $? -ne 0 ];then
+    echo "SKIP: No virtual ethernet pair device support in kernel"
+    exit $ksft_skip
+  fi
+done
+
+for i in $(seq 1 $maxclients);do
+  cl="ns-cl$i-$sfx"
+  echo netns exec "$cl" ip link set lo up
+  echo netns exec "$cl" ip link set eth0 up
+  echo netns exec "$cl" sysctl -q net.ipv4.tcp_syn_retries=2
+  echo netns exec "$gw" ip link set veth$i up
+  echo netns exec "$gw" sysctl -q net.ipv4.conf.veth$i.arp_ignore=2
+  echo netns exec "$gw" sysctl -q net.ipv4.conf.veth$i.rp_filter=0
+
+  # clients have same IP addresses.
+  echo netns exec "$cl" ip addr add 10.1.0.3/24 dev eth0
+  echo netns exec "$cl" ip addr add dead:1::3/64 dev eth0
+  echo netns exec "$cl" ip route add default via 10.1.0.2 dev eth0
+  echo netns exec "$cl" ip route add default via dead:1::2 dev eth0
+
+  # NB: same addresses on client-facing interfaces.
+  echo netns exec "$gw" ip addr add 10.1.0.2/24 dev veth$i
+  echo netns exec "$gw" ip addr add dead:1::2/64 dev veth$i
+
+  # gw: policy routing
+  echo netns exec "$gw" ip route add 10.1.0.0/24 dev veth$i table $((1000+i))
+  echo netns exec "$gw" ip route add dead:1::0/64 dev veth$i table $((1000+i))
+  echo netns exec "$gw" ip route add 10.3.0.0/24 dev veth0 table $((1000+i))
+  echo netns exec "$gw" ip route add dead:3::0/64 dev veth0 table $((1000+i))
+  echo netns exec "$gw" ip rule add fwmark $i lookup $((1000+i))
+done | ip -batch /dev/stdin
+
+ip -net "$gw" addr add 10.3.0.1/24 dev veth0
+ip -net "$gw" addr add dead:3::1/64 dev veth0
+
+ip -net "$srv" addr add 10.3.0.99/24 dev eth0
+ip -net "$srv" addr add dead:3::99/64 dev eth0
+
+ip netns exec $gw nft -f /dev/stdin<<EOF
+table inet raw {
+       map iiftomark {
+               type ifname : mark
+       }
+
+       map iiftozone {
+               typeof iifname : ct zone
+       }
+
+       set inicmp {
+               flags dynamic
+               type ipv4_addr . ifname . ipv4_addr
+       }
+       set inflows {
+               flags dynamic
+               type ipv4_addr . inet_service . ifname . ipv4_addr . inet_service
+       }
+
+       set inflows6 {
+               flags dynamic
+               type ipv6_addr . inet_service . ifname . ipv6_addr . inet_service
+       }
+
+       chain prerouting {
+               type filter hook prerouting priority -64000; policy accept;
+               ct original zone set meta iifname map @iiftozone
+               meta mark set meta iifname map @iiftomark
+
+               tcp flags & (syn|ack) == ack add @inflows { ip saddr . tcp sport . meta iifname . ip daddr . tcp dport counter }
+               add @inflows6 { ip6 saddr . tcp sport . meta iifname . ip6 daddr . tcp dport counter }
+               ip protocol icmp add @inicmp { ip saddr . meta iifname . ip daddr counter }
+       }
+
+       chain nat_postrouting {
+               type nat hook postrouting priority 0; policy accept;
+                ct mark set meta mark meta oifname veth0 masquerade
+       }
+
+       chain mangle_prerouting {
+               type filter hook prerouting priority -100; policy accept;
+               ct direction reply meta mark set ct mark
+       }
+}
+EOF
+
+( echo add element inet raw iiftomark \{
+       for i in $(seq 1 $((maxclients-1))); do
+               echo \"veth$i\" : $i,
+       done
+       echo \"veth$maxclients\" : $maxclients \}
+       echo add element inet raw iiftozone \{
+       for i in $(seq 1 $((maxclients-1))); do
+               echo \"veth$i\" : $i,
+       done
+       echo \"veth$maxclients\" : $maxclients \}
+) | ip netns exec $gw nft -f /dev/stdin
+
+ip netns exec "$gw" sysctl -q net.ipv4.conf.all.forwarding=1 > /dev/null
+ip netns exec "$gw" sysctl -q net.ipv6.conf.all.forwarding=1 > /dev/null
+ip netns exec "$gw" sysctl -q net.ipv4.conf.all.rp_filter=0 >/dev/null
+
+# useful for debugging: allows to use 'ping' from clients to gateway.
+ip netns exec "$gw" sysctl -q net.ipv4.fwmark_reflect=1 > /dev/null
+ip netns exec "$gw" sysctl -q net.ipv6.fwmark_reflect=1 > /dev/null
+
+for i in $(seq 1 $maxclients); do
+  cl="ns-cl$i-$sfx"
+  ip netns exec $cl ping -i 0.5 -q -c 3 10.3.0.99 > /dev/null 2>&1 &
+  if [ $? -ne 0 ]; then
+     echo FAIL: Ping failure from $cl 1>&2
+     ret=1
+     break
+  fi
+done
+
+wait
+
+for i in $(seq 1 $maxclients); do
+   ip netns exec $gw nft get element inet raw inicmp "{ 10.1.0.3 . \"veth$i\" . 10.3.0.99 }" | grep -q "{ 10.1.0.3 . \"veth$i\" . 10.3.0.99 counter packets 3 bytes 252 }"
+   if [ $? -ne 0 ];then
+      ret=1
+      echo "FAIL: counter icmp mismatch for veth$i" 1>&2
+      ip netns exec $gw nft get element inet raw inicmp "{ 10.1.0.3 . \"veth$i\" . 10.3.0.99 }" 1>&2
+      break
+   fi
+done
+
+ip netns exec $gw nft get element inet raw inicmp "{ 10.3.0.99 . \"veth0\" . 10.3.0.1 }" | grep -q "{ 10.3.0.99 . \"veth0\" . 10.3.0.1 counter packets $((3 * $maxclients)) bytes $((252 * $maxclients)) }"
+if [ $? -ne 0 ];then
+    ret=1
+    echo "FAIL: counter icmp mismatch for veth0: { 10.3.0.99 . \"veth0\" . 10.3.0.1 counter packets $((3 * $maxclients)) bytes $((252 * $maxclients)) }"
+    ip netns exec $gw nft get element inet raw inicmp "{ 10.3.99 . \"veth0\" . 10.3.0.1 }" 1>&2
+fi
+
+if  [ $ret -eq 0 ]; then
+       echo "PASS: ping test from all $maxclients namespaces"
+fi
+
+if [ $have_iperf -eq 0 ];then
+       echo "SKIP: iperf3 not installed"
+       if [ $ret -ne 0 ];then
+           exit $ret
+       fi
+       exit $ksft_skip
+fi
+
+ip netns exec $srv iperf3 -s > /dev/null 2>&1 &
+iperfpid=$!
+sleep 1
+
+for i in $(seq 1 $maxclients); do
+  if [ $ret -ne 0 ]; then
+     break
+  fi
+  cl="ns-cl$i-$sfx"
+  ip netns exec $cl iperf3 -c 10.3.0.99 --cport 10000 -n 1 > /dev/null
+  if [ $? -ne 0 ]; then
+     echo FAIL: Failure to connect for $cl 1>&2
+     ip netns exec $gw conntrack -S 1>&2
+     ret=1
+  fi
+done
+if [ $ret -eq 0 ];then
+       echo "PASS: iperf3 connections for all $maxclients net namespaces"
+fi
+
+kill $iperfpid
+wait
+
+for i in $(seq 1 $maxclients); do
+   ip netns exec $gw nft get element inet raw inflows "{ 10.1.0.3 . 10000 . \"veth$i\" . 10.3.0.99 . 5201 }" > /dev/null
+   if [ $? -ne 0 ];then
+      ret=1
+      echo "FAIL: can't find expected tcp entry for veth$i" 1>&2
+      break
+   fi
+done
+if [ $ret -eq 0 ];then
+       echo "PASS: Found client connection for all $maxclients net namespaces"
+fi
+
+ip netns exec $gw nft get element inet raw inflows "{ 10.3.0.99 . 5201 . \"veth0\" . 10.3.0.1 . 10000 }" > /dev/null
+if [ $? -ne 0 ];then
+    ret=1
+    echo "FAIL: cannot find return entry on veth0" 1>&2
+fi
+
+exit $ret
diff --git a/tools/testing/selftests/netfilter/nft_zones_many.sh b/tools/testing/selftests/netfilter/nft_zones_many.sh
new file mode 100755 (executable)
index 0000000..ac64637
--- /dev/null
@@ -0,0 +1,156 @@
+#!/bin/bash
+
+# Test insertion speed for packets with identical addresses/ports
+# that are all placed in distinct conntrack zones.
+
+sfx=$(mktemp -u "XXXXXXXX")
+ns="ns-$sfx"
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+zones=20000
+have_ct_tool=0
+ret=0
+
+cleanup()
+{
+       ip netns del $ns
+}
+
+ip netns add $ns
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not create net namespace $gw"
+       exit $ksft_skip
+fi
+
+trap cleanup EXIT
+
+conntrack -V > /dev/null 2>&1
+if [ $? -eq 0 ];then
+       have_ct_tool=1
+fi
+
+ip -net "$ns" link set lo up
+
+test_zones() {
+       local max_zones=$1
+
+ip netns exec $ns sysctl -q net.netfilter.nf_conntrack_udp_timeout=3600
+ip netns exec $ns nft -f /dev/stdin<<EOF
+flush ruleset
+table inet raw {
+       map rndzone {
+               typeof numgen inc mod $max_zones : ct zone
+       }
+
+       chain output {
+               type filter hook output priority -64000; policy accept;
+               udp dport 12345  ct zone set numgen inc mod 65536 map @rndzone
+       }
+}
+EOF
+       (
+               echo "add element inet raw rndzone {"
+       for i in $(seq 1 $max_zones);do
+               echo -n "$i : $i"
+               if [ $i -lt $max_zones ]; then
+                       echo ","
+               else
+                       echo "}"
+               fi
+       done
+       ) | ip netns exec $ns nft -f /dev/stdin
+
+       local i=0
+       local j=0
+       local outerstart=$(date +%s%3N)
+       local stop=$outerstart
+
+       while [ $i -lt $max_zones ]; do
+               local start=$(date +%s%3N)
+               i=$((i + 10000))
+               j=$((j + 1))
+               dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" nc -w 1 -q 1 -u -p 12345 127.0.0.1 12345 > /dev/null
+               if [ $? -ne 0 ] ;then
+                       ret=1
+                       break
+               fi
+
+               stop=$(date +%s%3N)
+               local duration=$((stop-start))
+               echo "PASS: added 10000 entries in $duration ms (now $i total, loop $j)"
+       done
+
+       if [ $have_ct_tool -eq 1 ]; then
+               local count=$(ip netns exec "$ns" conntrack -C)
+               local duration=$((stop-outerstart))
+
+               if [ $count -eq $max_zones ]; then
+                       echo "PASS: inserted $count entries from packet path in $duration ms total"
+               else
+                       ip netns exec $ns conntrack -S 1>&2
+                       echo "FAIL: inserted $count entries from packet path in $duration ms total, expected $max_zones entries"
+                       ret=1
+               fi
+       fi
+
+       if [ $ret -ne 0 ];then
+               echo "FAIL: insert $max_zones entries from packet path" 1>&2
+       fi
+}
+
+test_conntrack_tool() {
+       local max_zones=$1
+
+       ip netns exec $ns conntrack -F >/dev/null 2>/dev/null
+
+       local outerstart=$(date +%s%3N)
+       local start=$(date +%s%3N)
+       local stop=$start
+       local i=0
+       while [ $i -lt $max_zones ]; do
+               i=$((i + 1))
+               ip netns exec "$ns" conntrack -I -s 1.1.1.1 -d 2.2.2.2 --protonum 6 \
+                        --timeout 3600 --state ESTABLISHED --sport 12345 --dport 1000 --zone $i >/dev/null 2>&1
+               if [ $? -ne 0 ];then
+                       ip netns exec "$ns" conntrack -I -s 1.1.1.1 -d 2.2.2.2 --protonum 6 \
+                        --timeout 3600 --state ESTABLISHED --sport 12345 --dport 1000 --zone $i > /dev/null
+                       echo "FAIL: conntrack -I returned an error"
+                       ret=1
+                       break
+               fi
+
+               if [ $((i%10000)) -eq 0 ];then
+                       stop=$(date +%s%3N)
+
+                       local duration=$((stop-start))
+                       echo "PASS: added 10000 entries in $duration ms (now $i total)"
+                       start=$stop
+               fi
+       done
+
+       local count=$(ip netns exec "$ns" conntrack -C)
+       local duration=$((stop-outerstart))
+
+       if [ $count -eq $max_zones ]; then
+               echo "PASS: inserted $count entries via ctnetlink in $duration ms"
+       else
+               ip netns exec $ns conntrack -S 1>&2
+               echo "FAIL: inserted $count entries via ctnetlink in $duration ms, expected $max_zones entries ($duration ms)"
+               ret=1
+       fi
+}
+
+test_zones $zones
+
+if [ $have_ct_tool -eq 1 ];then
+       test_conntrack_tool $zones
+else
+       echo "SKIP: Could not run ctnetlink insertion test without conntrack tool"
+       if [ $ret -eq 0 ];then
+               exit $ksft_skip
+       fi
+fi
+
+exit $ret
index 10ab56c2484ae6211e30b6e58b389bcb56130380..60aa1a4fc69b63e165d241b4c14bce5f20c8a13d 100644 (file)
@@ -414,9 +414,6 @@ static void uffd_test_ctx_init_ext(uint64_t *features)
        uffd_test_ops->allocate_area((void **)&area_src);
        uffd_test_ops->allocate_area((void **)&area_dst);
 
-       uffd_test_ops->release_pages(area_src);
-       uffd_test_ops->release_pages(area_dst);
-
        userfaultfd_open(features);
 
        count_verify = malloc(nr_pages * sizeof(unsigned long long));
@@ -437,6 +434,26 @@ static void uffd_test_ctx_init_ext(uint64_t *features)
                *(area_count(area_src, nr) + 1) = 1;
        }
 
+       /*
+        * After initialization of area_src, we must explicitly release pages
+        * for area_dst to make sure it's fully empty.  Otherwise we could have
+        * some area_dst pages be errornously initialized with zero pages,
+        * hence we could hit memory corruption later in the test.
+        *
+        * One example is when THP is globally enabled, above allocate_area()
+        * calls could have the two areas merged into a single VMA (as they
+        * will have the same VMA flags so they're mergeable).  When we
+        * initialize the area_src above, it's possible that some part of
+        * area_dst could have been faulted in via one huge THP that will be
+        * shared between area_src and area_dst.  It could cause some of the
+        * area_dst won't be trapped by missing userfaults.
+        *
+        * This release_pages() will guarantee even if that happened, we'll
+        * proactively split the thp and drop any accidentally initialized
+        * pages within area_dst.
+        */
+       uffd_test_ops->release_pages(area_dst);
+
        pipefd = malloc(sizeof(int) * nr_cpus * 2);
        if (!pipefd)
                err("pipefd");
index cec6f5a738e1e4325c0f06809bf410ad7f3d4806..fa927ad16f8a227bf5677e16145f58598208e643 100644 (file)
@@ -332,8 +332,6 @@ static void test_no_sockets(const struct test_opts *opts)
        read_vsock_stat(&sockets);
 
        check_no_sockets(&sockets);
-
-       free_sock_stat(&sockets);
 }
 
 static void test_listen_socket_server(const struct test_opts *opts)
index 439d3b4cd1a941ea8aca486ccc7e6c2b50241e48..7851f3a1b5f7c0c5ea59d808b40d39ef7cf0fd8a 100644 (file)
@@ -235,9 +235,13 @@ static void ack_flush(void *_completed)
 {
 }
 
-static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
+static inline bool kvm_kick_many_cpus(cpumask_var_t tmp, bool wait)
 {
-       if (unlikely(!cpus))
+       const struct cpumask *cpus;
+
+       if (likely(cpumask_available(tmp)))
+               cpus = tmp;
+       else
                cpus = cpu_online_mask;
 
        if (cpumask_empty(cpus))
@@ -263,14 +267,34 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
                        continue;
 
                kvm_make_request(req, vcpu);
-               cpu = vcpu->cpu;
 
                if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
                        continue;
 
-               if (tmp != NULL && cpu != -1 && cpu != me &&
-                   kvm_request_needs_ipi(vcpu, req))
-                       __cpumask_set_cpu(cpu, tmp);
+               /*
+                * tmp can be "unavailable" if cpumasks are allocated off stack
+                * as allocation of the mask is deliberately not fatal and is
+                * handled by falling back to kicking all online CPUs.
+                */
+               if (!cpumask_available(tmp))
+                       continue;
+
+               /*
+                * Note, the vCPU could get migrated to a different pCPU at any
+                * point after kvm_request_needs_ipi(), which could result in
+                * sending an IPI to the previous pCPU.  But, that's ok because
+                * the purpose of the IPI is to ensure the vCPU returns to
+                * OUTSIDE_GUEST_MODE, which is satisfied if the vCPU migrates.
+                * Entering READING_SHADOW_PAGE_TABLES after this point is also
+                * ok, as the requirement is only that KVM wait for vCPUs that
+                * were reading SPTEs _before_ any changes were finalized.  See
+                * kvm_vcpu_kick() for more details on handling requests.
+                */
+               if (kvm_request_needs_ipi(vcpu, req)) {
+                       cpu = READ_ONCE(vcpu->cpu);
+                       if (cpu != -1 && cpu != me)
+                               __cpumask_set_cpu(cpu, tmp);
+               }
        }
 
        called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT));
@@ -302,13 +326,8 @@ EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
 void kvm_flush_remote_tlbs(struct kvm *kvm)
 {
-       /*
-        * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in
-        * kvm_make_all_cpus_request.
-        */
-       long dirty_count = smp_load_acquire(&kvm->tlbs_dirty);
-
        ++kvm->stat.generic.remote_tlb_flush_requests;
+
        /*
         * We want to publish modifications to the page tables before reading
         * mode. Pairs with a memory barrier in arch-specific code.
@@ -323,7 +342,6 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
        if (!kvm_arch_flush_remote_tlb(kvm)
            || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
                ++kvm->stat.generic.remote_tlb_flush;
-       cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
 }
 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
 #endif
@@ -528,7 +546,7 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
                }
        }
 
-       if (range->flush_on_ret && (ret || kvm->tlbs_dirty))
+       if (range->flush_on_ret && ret)
                kvm_flush_remote_tlbs(kvm);
 
        if (locked)
@@ -3134,15 +3152,19 @@ out:
 
 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
 {
-       unsigned int old, val, shrink;
+       unsigned int old, val, shrink, grow_start;
 
        old = val = vcpu->halt_poll_ns;
        shrink = READ_ONCE(halt_poll_ns_shrink);
+       grow_start = READ_ONCE(halt_poll_ns_grow_start);
        if (shrink == 0)
                val = 0;
        else
                val /= shrink;
 
+       if (val < grow_start)
+               val = 0;
+
        vcpu->halt_poll_ns = val;
        trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
 }
@@ -3290,16 +3312,24 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
  */
 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
 {
-       int me;
-       int cpu = vcpu->cpu;
+       int me, cpu;
 
        if (kvm_vcpu_wake_up(vcpu))
                return;
 
+       /*
+        * Note, the vCPU could get migrated to a different pCPU at any point
+        * after kvm_arch_vcpu_should_kick(), which could result in sending an
+        * IPI to the previous pCPU.  But, that's ok because the purpose of the
+        * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the
+        * vCPU also requires it to leave IN_GUEST_MODE.
+        */
        me = get_cpu();
-       if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
-               if (kvm_arch_vcpu_should_kick(vcpu))
+       if (kvm_arch_vcpu_should_kick(vcpu)) {
+               cpu = READ_ONCE(vcpu->cpu);
+               if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
                        smp_send_reschedule(cpu);
+       }
        put_cpu();
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_kick);